Coverage Report

Created: 2017-11-23 03:11

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/polly/lib/External/isl/isl_schedule_node.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright 2013-2014 Ecole Normale Superieure
3
 * Copyright 2014      INRIA Rocquencourt
4
 * Copyright 2016      Sven Verdoolaege
5
 *
6
 * Use of this software is governed by the MIT license
7
 *
8
 * Written by Sven Verdoolaege,
9
 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
10
 * and Inria Paris - Rocquencourt, Domaine de Voluceau - Rocquencourt,
11
 * B.P. 105 - 78153 Le Chesnay, France
12
 */
13
14
#include <isl/set.h>
15
#include <isl_schedule_band.h>
16
#include <isl_schedule_private.h>
17
#include <isl_schedule_node_private.h>
18
19
/* Create a new schedule node in the given schedule, point at the given
20
 * tree with given ancestors and child positions.
21
 * "child_pos" may be NULL if there are no ancestors.
22
 */
23
__isl_give isl_schedule_node *isl_schedule_node_alloc(
24
  __isl_take isl_schedule *schedule, __isl_take isl_schedule_tree *tree,
25
  __isl_take isl_schedule_tree_list *ancestors, int *child_pos)
26
59.9k
{
27
59.9k
  isl_ctx *ctx;
28
59.9k
  isl_schedule_node *node;
29
59.9k
  int i, n;
30
59.9k
31
59.9k
  if (!schedule || !tree || !ancestors)
32
0
    goto error;
33
59.9k
  n = isl_schedule_tree_list_n_schedule_tree(ancestors);
34
59.9k
  if (n > 0 && 
!child_pos38.8k
)
35
0
    goto error;
36
59.9k
  ctx = isl_schedule_get_ctx(schedule);
37
59.9k
  node = isl_calloc_type(ctx, isl_schedule_node);
38
59.9k
  if (!node)
39
0
    goto error;
40
59.9k
  node->ref = 1;
41
59.9k
  node->schedule = schedule;
42
59.9k
  node->tree = tree;
43
59.9k
  node->ancestors = ancestors;
44
59.9k
  node->child_pos = isl_alloc_array(ctx, int, n);
45
59.9k
  if (n && 
!node->child_pos38.8k
)
46
0
    return isl_schedule_node_free(node);
47
245k
  
for (i = 0; 59.9k
i < n;
++i185k
)
48
185k
    node->child_pos[i] = child_pos[i];
49
59.9k
50
59.9k
  return node;
51
0
error:
52
0
  isl_schedule_free(schedule);
53
0
  isl_schedule_tree_free(tree);
54
0
  isl_schedule_tree_list_free(ancestors);
55
0
  return NULL;
56
59.9k
}
57
58
/* Return a pointer to the root of a schedule tree with as single
59
 * node a domain node with the given domain.
60
 */
61
__isl_give isl_schedule_node *isl_schedule_node_from_domain(
62
  __isl_take isl_union_set *domain)
63
126
{
64
126
  isl_schedule *schedule;
65
126
  isl_schedule_node *node;
66
126
67
126
  schedule = isl_schedule_from_domain(domain);
68
126
  node = isl_schedule_get_root(schedule);
69
126
  isl_schedule_free(schedule);
70
126
71
126
  return node;
72
126
}
73
74
/* Return a pointer to the root of a schedule tree with as single
75
 * node a extension node with the given extension.
76
 */
77
__isl_give isl_schedule_node *isl_schedule_node_from_extension(
78
  __isl_take isl_union_map *extension)
79
24
{
80
24
  isl_ctx *ctx;
81
24
  isl_schedule *schedule;
82
24
  isl_schedule_tree *tree;
83
24
  isl_schedule_node *node;
84
24
85
24
  if (!extension)
86
0
    return NULL;
87
24
88
24
  ctx = isl_union_map_get_ctx(extension);
89
24
  tree = isl_schedule_tree_from_extension(extension);
90
24
  schedule = isl_schedule_from_schedule_tree(ctx, tree);
91
24
  node = isl_schedule_get_root(schedule);
92
24
  isl_schedule_free(schedule);
93
24
94
24
  return node;
95
24
}
96
97
/* Return the isl_ctx to which "node" belongs.
98
 */
99
isl_ctx *isl_schedule_node_get_ctx(__isl_keep isl_schedule_node *node)
100
41.5k
{
101
41.5k
  return node ? isl_schedule_get_ctx(node->schedule) : NULL;
102
41.5k
}
103
104
/* Return a pointer to the leaf of the schedule into which "node" points.
105
 */
106
__isl_keep isl_schedule_tree *isl_schedule_node_peek_leaf(
107
  __isl_keep isl_schedule_node *node)
108
20.2k
{
109
20.2k
  return node ? isl_schedule_peek_leaf(node->schedule) : NULL;
110
20.2k
}
111
112
/* Return a copy of the leaf of the schedule into which "node" points.
113
 */
114
__isl_give isl_schedule_tree *isl_schedule_node_get_leaf(
115
  __isl_keep isl_schedule_node *node)
116
18.7k
{
117
18.7k
  return isl_schedule_tree_copy(isl_schedule_node_peek_leaf(node));
118
18.7k
}
119
120
/* Return the type of the node or isl_schedule_node_error on error.
121
 */
122
enum isl_schedule_node_type isl_schedule_node_get_type(
123
  __isl_keep isl_schedule_node *node)
124
113k
{
125
113k
  return node ? isl_schedule_tree_get_type(node->tree)
126
113k
        : 
isl_schedule_node_error0
;
127
113k
}
128
129
/* Return the type of the parent of "node" or isl_schedule_node_error on error.
130
 */
131
enum isl_schedule_node_type isl_schedule_node_get_parent_type(
132
  __isl_keep isl_schedule_node *node)
133
2.98k
{
134
2.98k
  int pos;
135
2.98k
  int has_parent;
136
2.98k
  isl_schedule_tree *parent;
137
2.98k
  enum isl_schedule_node_type type;
138
2.98k
139
2.98k
  if (!node)
140
0
    return isl_schedule_node_error;
141
2.98k
  has_parent = isl_schedule_node_has_parent(node);
142
2.98k
  if (has_parent < 0)
143
0
    return isl_schedule_node_error;
144
2.98k
  if (!has_parent)
145
2.98k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
146
2.98k
      "node has no parent", return isl_schedule_node_error);
147
2.98k
148
2.98k
  pos = isl_schedule_tree_list_n_schedule_tree(node->ancestors) - 1;
149
2.98k
  parent = isl_schedule_tree_list_get_schedule_tree(node->ancestors, pos);
150
2.98k
  type = isl_schedule_tree_get_type(parent);
151
2.98k
  isl_schedule_tree_free(parent);
152
2.98k
153
2.98k
  return type;
154
2.98k
}
155
156
/* Return a copy of the subtree that this node points to.
157
 */
158
__isl_give isl_schedule_tree *isl_schedule_node_get_tree(
159
  __isl_keep isl_schedule_node *node)
160
5.84k
{
161
5.84k
  if (!node)
162
0
    return NULL;
163
5.84k
164
5.84k
  return isl_schedule_tree_copy(node->tree);
165
5.84k
}
166
167
/* Return a copy of the schedule into which "node" points.
168
 */
169
__isl_give isl_schedule *isl_schedule_node_get_schedule(
170
  __isl_keep isl_schedule_node *node)
171
6.21k
{
172
6.21k
  if (!node)
173
0
    return NULL;
174
6.21k
  return isl_schedule_copy(node->schedule);
175
6.21k
}
176
177
/* Return a fresh copy of "node".
178
 */
179
__isl_take isl_schedule_node *isl_schedule_node_dup(
180
  __isl_keep isl_schedule_node *node)
181
45.3k
{
182
45.3k
  if (!node)
183
0
    return NULL;
184
45.3k
185
45.3k
  return isl_schedule_node_alloc(isl_schedule_copy(node->schedule),
186
45.3k
        isl_schedule_tree_copy(node->tree),
187
45.3k
        isl_schedule_tree_list_copy(node->ancestors),
188
45.3k
        node->child_pos);
189
45.3k
}
190
191
/* Return an isl_schedule_node that is equal to "node" and that has only
192
 * a single reference.
193
 */
194
__isl_give isl_schedule_node *isl_schedule_node_cow(
195
  __isl_take isl_schedule_node *node)
196
125k
{
197
125k
  if (!node)
198
0
    return NULL;
199
125k
200
125k
  if (node->ref == 1)
201
79.8k
    return node;
202
45.3k
  node->ref--;
203
45.3k
  return isl_schedule_node_dup(node);
204
45.3k
}
205
206
/* Return a new reference to "node".
207
 */
208
__isl_give isl_schedule_node *isl_schedule_node_copy(
209
  __isl_keep isl_schedule_node *node)
210
115k
{
211
115k
  if (!node)
212
12.6k
    return NULL;
213
102k
214
102k
  node->ref++;
215
102k
  return node;
216
102k
}
217
218
/* Free "node" and return NULL.
219
 */
220
__isl_null isl_schedule_node *isl_schedule_node_free(
221
  __isl_take isl_schedule_node *node)
222
132k
{
223
132k
  if (!node)
224
15.3k
    return NULL;
225
117k
  if (--node->ref > 0)
226
57.2k
    return NULL;
227
59.9k
228
59.9k
  isl_schedule_tree_list_free(node->ancestors);
229
59.9k
  free(node->child_pos);
230
59.9k
  isl_schedule_tree_free(node->tree);
231
59.9k
  isl_schedule_free(node->schedule);
232
59.9k
  free(node);
233
59.9k
234
59.9k
  return NULL;
235
59.9k
}
236
237
/* Do "node1" and "node2" point to the same position in the same
238
 * schedule?
239
 */
240
isl_bool isl_schedule_node_is_equal(__isl_keep isl_schedule_node *node1,
241
  __isl_keep isl_schedule_node *node2)
242
59.2k
{
243
59.2k
  int i, n1, n2;
244
59.2k
245
59.2k
  if (!node1 || !node2)
246
0
    return isl_bool_error;
247
59.2k
  if (node1 == node2)
248
28.0k
    return isl_bool_true;
249
31.2k
  if (node1->schedule != node2->schedule)
250
0
    return isl_bool_false;
251
31.2k
252
31.2k
  n1 = isl_schedule_node_get_tree_depth(node1);
253
31.2k
  n2 = isl_schedule_node_get_tree_depth(node2);
254
31.2k
  if (n1 != n2)
255
6.27k
    return isl_bool_false;
256
57.7k
  
for (i = 0; 24.9k
i < n1;
++i32.7k
)
257
57.7k
    if (node1->child_pos[i] != node2->child_pos[i])
258
24.9k
      return isl_bool_false;
259
24.9k
260
24.9k
  
return isl_bool_true0
;
261
59.2k
}
262
263
/* Return the number of outer schedule dimensions of "node"
264
 * in its schedule tree.
265
 *
266
 * Return -1 on error.
267
 */
268
int isl_schedule_node_get_schedule_depth(__isl_keep isl_schedule_node *node)
269
61.6k
{
270
61.6k
  int i, n;
271
61.6k
  int depth = 0;
272
61.6k
273
61.6k
  if (!node)
274
0
    return -1;
275
61.6k
276
61.6k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
277
234k
  for (i = n - 1; i >= 0; 
--i172k
) {
278
172k
    isl_schedule_tree *tree;
279
172k
280
172k
    tree = isl_schedule_tree_list_get_schedule_tree(
281
172k
                node->ancestors, i);
282
172k
    if (!tree)
283
0
      return -1;
284
172k
    if (tree->type == isl_schedule_node_band)
285
59.3k
      depth += isl_schedule_tree_band_n_member(tree);
286
172k
    isl_schedule_tree_free(tree);
287
172k
  }
288
61.6k
289
61.6k
  return depth;
290
61.6k
}
291
292
/* Internal data structure for
293
 * isl_schedule_node_get_prefix_schedule_union_pw_multi_aff
294
 *
295
 * "initialized" is set if the filter field has been initialized.
296
 * If "universe_domain" is not set, then the collected filter is intersected
297
 * with the the domain of the root domain node.
298
 * "universe_filter" is set if we are only collecting the universes of filters
299
 * "collect_prefix" is set if we are collecting prefixes.
300
 * "filter" collects all outer filters and is NULL until "initialized" is set.
301
 * "prefix" collects all outer band partial schedules (if "collect_prefix"
302
 * is set).  If it is used, then it is initialized by the caller
303
 * of collect_filter_prefix to a zero-dimensional function.
304
 */
305
struct isl_schedule_node_get_filter_prefix_data {
306
  int initialized;
307
  int universe_domain;
308
  int universe_filter;
309
  int collect_prefix;
310
  isl_union_set *filter;
311
  isl_multi_union_pw_aff *prefix;
312
};
313
314
static int collect_filter_prefix(__isl_keep isl_schedule_tree_list *list,
315
  int n, struct isl_schedule_node_get_filter_prefix_data *data);
316
317
/* Update the filter and prefix information in "data" based on the first "n"
318
 * elements in "list" and the expansion tree root "tree".
319
 *
320
 * We first collect the information from the elements in "list",
321
 * initializing the filter based on the domain of the expansion.
322
 * Then we map the results to the expanded space and combined them
323
 * with the results already in "data".
324
 */
325
static int collect_filter_prefix_expansion(__isl_take isl_schedule_tree *tree,
326
  __isl_keep isl_schedule_tree_list *list, int n,
327
  struct isl_schedule_node_get_filter_prefix_data *data)
328
6
{
329
6
  struct isl_schedule_node_get_filter_prefix_data contracted;
330
6
  isl_union_pw_multi_aff *c;
331
6
  isl_union_map *exp, *universe;
332
6
  isl_union_set *filter;
333
6
334
6
  c = isl_schedule_tree_expansion_get_contraction(tree);
335
6
  exp = isl_schedule_tree_expansion_get_expansion(tree);
336
6
337
6
  contracted.initialized = 1;
338
6
  contracted.universe_domain = data->universe_domain;
339
6
  contracted.universe_filter = data->universe_filter;
340
6
  contracted.collect_prefix = data->collect_prefix;
341
6
  universe = isl_union_map_universe(isl_union_map_copy(exp));
342
6
  filter = isl_union_map_domain(universe);
343
6
  if (data->collect_prefix) {
344
3
    isl_space *space = isl_union_set_get_space(filter);
345
3
    space = isl_space_set_from_params(space);
346
3
    contracted.prefix = isl_multi_union_pw_aff_zero(space);
347
3
  }
348
6
  contracted.filter = filter;
349
6
350
6
  if (collect_filter_prefix(list, n, &contracted) < 0)
351
0
    contracted.filter = isl_union_set_free(contracted.filter);
352
6
  if (data->collect_prefix) {
353
3
    isl_multi_union_pw_aff *prefix;
354
3
355
3
    prefix = contracted.prefix;
356
3
    prefix =
357
3
        isl_multi_union_pw_aff_pullback_union_pw_multi_aff(prefix,
358
3
            isl_union_pw_multi_aff_copy(c));
359
3
    data->prefix = isl_multi_union_pw_aff_flat_range_product(
360
3
            prefix, data->prefix);
361
3
  }
362
6
  filter = contracted.filter;
363
6
  if (data->universe_domain)
364
3
    filter = isl_union_set_preimage_union_pw_multi_aff(filter,
365
3
            isl_union_pw_multi_aff_copy(c));
366
3
  else
367
3
    filter = isl_union_set_apply(filter, isl_union_map_copy(exp));
368
6
  if (!data->initialized)
369
1
    data->filter = filter;
370
5
  else
371
5
    data->filter = isl_union_set_intersect(filter, data->filter);
372
6
  data->initialized = 1;
373
6
374
6
  isl_union_pw_multi_aff_free(c);
375
6
  isl_union_map_free(exp);
376
6
  isl_schedule_tree_free(tree);
377
6
378
6
  return 0;
379
6
}
380
381
/* Update the filter information in "data" based on the first "n"
382
 * elements in "list" and the extension tree root "tree", in case
383
 * data->universe_domain is set and data->collect_prefix is not.
384
 *
385
 * We collect the universe domain of the elements in "list" and
386
 * add it to the universe range of the extension (intersected
387
 * with the already collected filter, if any).
388
 */
389
static int collect_universe_domain_extension(__isl_take isl_schedule_tree *tree,
390
  __isl_keep isl_schedule_tree_list *list, int n,
391
  struct isl_schedule_node_get_filter_prefix_data *data)
392
12
{
393
12
  struct isl_schedule_node_get_filter_prefix_data data_outer;
394
12
  isl_union_map *extension;
395
12
  isl_union_set *filter;
396
12
397
12
  data_outer.initialized = 0;
398
12
  data_outer.universe_domain = 1;
399
12
  data_outer.universe_filter = data->universe_filter;
400
12
  data_outer.collect_prefix = 0;
401
12
  data_outer.filter = NULL;
402
12
  data_outer.prefix = NULL;
403
12
404
12
  if (collect_filter_prefix(list, n, &data_outer) < 0)
405
0
    data_outer.filter = isl_union_set_free(data_outer.filter);
406
12
407
12
  extension = isl_schedule_tree_extension_get_extension(tree);
408
12
  extension = isl_union_map_universe(extension);
409
12
  filter = isl_union_map_range(extension);
410
12
  if (data_outer.initialized)
411
12
    filter = isl_union_set_union(filter, data_outer.filter);
412
12
  if (data->initialized)
413
12
    filter = isl_union_set_intersect(filter, data->filter);
414
12
415
12
  data->filter = filter;
416
12
417
12
  isl_schedule_tree_free(tree);
418
12
419
12
  return 0;
420
12
}
421
422
/* Update "data" based on the tree node "tree" in case "data" has
423
 * not been initialized yet.
424
 *
425
 * Return 0 on success and -1 on error.
426
 *
427
 * If "tree" is a filter, then we set data->filter to this filter
428
 * (or its universe).
429
 * If "tree" is a domain, then this means we have reached the root
430
 * of the schedule tree without being able to extract any information.
431
 * We therefore initialize data->filter to the universe of the domain,
432
 * or the domain itself if data->universe_domain is not set.
433
 * If "tree" is a band with at least one member, then we set data->filter
434
 * to the universe of the schedule domain and replace the zero-dimensional
435
 * data->prefix by the band schedule (if data->collect_prefix is set).
436
 */
437
static int collect_filter_prefix_init(__isl_keep isl_schedule_tree *tree,
438
  struct isl_schedule_node_get_filter_prefix_data *data)
439
7.63k
{
440
7.63k
  enum isl_schedule_node_type type;
441
7.63k
  isl_multi_union_pw_aff *mupa;
442
7.63k
  isl_union_set *filter;
443
7.63k
444
7.63k
  type = isl_schedule_tree_get_type(tree);
445
7.63k
  switch (type) {
446
7.63k
  case isl_schedule_node_error:
447
0
    return -1;
448
7.63k
  case isl_schedule_node_expansion:
449
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_internal,
450
0
      "should be handled by caller", return -1);
451
0
  case isl_schedule_node_extension:
452
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_invalid,
453
0
      "cannot handle extension nodes", return -1);
454
15
  case isl_schedule_node_context:
455
15
  case isl_schedule_node_leaf:
456
15
  case isl_schedule_node_guard:
457
15
  case isl_schedule_node_mark:
458
15
  case isl_schedule_node_sequence:
459
15
  case isl_schedule_node_set:
460
15
    return 0;
461
324
  case isl_schedule_node_domain:
462
324
    filter = isl_schedule_tree_domain_get_domain(tree);
463
324
    if (data->universe_domain)
464
178
      filter = isl_union_set_universe(filter);
465
324
    data->filter = filter;
466
324
    break;
467
4.77k
  case isl_schedule_node_band:
468
4.77k
    if (isl_schedule_tree_band_n_member(tree) == 0)
469
0
      return 0;
470
4.77k
    mupa = isl_schedule_tree_band_get_partial_schedule(tree);
471
4.77k
    if (data->collect_prefix) {
472
2.37k
      isl_multi_union_pw_aff_free(data->prefix);
473
2.37k
      mupa = isl_multi_union_pw_aff_reset_tuple_id(mupa,
474
2.37k
                isl_dim_set);
475
2.37k
      data->prefix = isl_multi_union_pw_aff_copy(mupa);
476
2.37k
    }
477
4.77k
    filter = isl_multi_union_pw_aff_domain(mupa);
478
4.77k
    filter = isl_union_set_universe(filter);
479
4.77k
    data->filter = filter;
480
4.77k
    break;
481
4.77k
  case isl_schedule_node_filter:
482
2.52k
    filter = isl_schedule_tree_filter_get_filter(tree);
483
2.52k
    if (data->universe_filter)
484
1.26k
      filter = isl_union_set_universe(filter);
485
0
    data->filter = filter;
486
0
    break;
487
7.62k
  }
488
7.62k
489
7.62k
  if ((data->collect_prefix && 
!data->prefix3.77k
) || !data->filter)
490
0
    return -1;
491
7.62k
492
7.62k
  data->initialized = 1;
493
7.62k
494
7.62k
  return 0;
495
7.62k
}
496
497
/* Update "data" based on the tree node "tree" in case "data" has
498
 * already been initialized.
499
 *
500
 * Return 0 on success and -1 on error.
501
 *
502
 * If "tree" is a domain and data->universe_domain is not set, then
503
 * intersect data->filter with the domain.
504
 * If "tree" is a filter, then we intersect data->filter with this filter
505
 * (or its universe).
506
 * If "tree" is a band with at least one member and data->collect_prefix
507
 * is set, then we extend data->prefix with the band schedule.
508
 * If "tree" is an extension, then we make sure that we are not collecting
509
 * information on any extended domain elements.
510
 */
511
static int collect_filter_prefix_update(__isl_keep isl_schedule_tree *tree,
512
  struct isl_schedule_node_get_filter_prefix_data *data)
513
18.6k
{
514
18.6k
  enum isl_schedule_node_type type;
515
18.6k
  isl_multi_union_pw_aff *mupa;
516
18.6k
  isl_union_set *filter;
517
18.6k
  isl_union_map *extension;
518
18.6k
  int empty;
519
18.6k
520
18.6k
  type = isl_schedule_tree_get_type(tree);
521
18.6k
  switch (type) {
522
18.6k
  case isl_schedule_node_error:
523
0
    return -1;
524
18.6k
  case isl_schedule_node_expansion:
525
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_internal,
526
0
      "should be handled by caller", return -1);
527
12
  case isl_schedule_node_extension:
528
12
    extension = isl_schedule_tree_extension_get_extension(tree);
529
12
    extension = isl_union_map_intersect_range(extension,
530
12
          isl_union_set_copy(data->filter));
531
12
    empty = isl_union_map_is_empty(extension);
532
12
    isl_union_map_free(extension);
533
12
    if (empty < 0)
534
0
      return -1;
535
12
    if (empty)
536
12
      break;
537
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_invalid,
538
0
      "cannot handle extension nodes", return -1);
539
4.95k
  case isl_schedule_node_context:
540
4.95k
  case isl_schedule_node_leaf:
541
4.95k
  case isl_schedule_node_guard:
542
4.95k
  case isl_schedule_node_mark:
543
4.95k
  case isl_schedule_node_sequence:
544
4.95k
  case isl_schedule_node_set:
545
4.95k
    break;
546
7.28k
  case isl_schedule_node_domain:
547
7.28k
    if (data->universe_domain)
548
3.64k
      break;
549
3.64k
    filter = isl_schedule_tree_domain_get_domain(tree);
550
3.64k
    data->filter = isl_union_set_intersect(data->filter, filter);
551
3.64k
    break;
552
4.23k
  case isl_schedule_node_band:
553
4.23k
    if (isl_schedule_tree_band_n_member(tree) == 0)
554
0
      break;
555
4.23k
    if (!data->collect_prefix)
556
2.08k
      break;
557
2.15k
    mupa = isl_schedule_tree_band_get_partial_schedule(tree);
558
2.15k
    data->prefix = isl_multi_union_pw_aff_flat_range_product(mupa,
559
2.15k
                data->prefix);
560
2.15k
    if (!data->prefix)
561
0
      return -1;
562
2.15k
    break;
563
2.15k
  case isl_schedule_node_filter:
564
2.15k
    filter = isl_schedule_tree_filter_get_filter(tree);
565
2.15k
    if (data->universe_filter)
566
1.07k
      filter = isl_union_set_universe(filter);
567
2.15k
    data->filter = isl_union_set_intersect(data->filter, filter);
568
2.15k
    if (!data->filter)
569
0
      return -1;
570
2.15k
    break;
571
18.6k
  }
572
18.6k
573
18.6k
  return 0;
574
18.6k
}
575
576
/* Collect filter and/or prefix information from the first "n"
577
 * elements in "list" (which represent the ancestors of a node).
578
 * Store the results in "data".
579
 *
580
 * Extension nodes are only supported if they do not affect the outcome,
581
 * i.e., if we are collecting information on non-extended domain elements,
582
 * or if we are collecting the universe domain (without prefix).
583
 *
584
 * Return 0 on success and -1 on error.
585
 *
586
 * We traverse the list from innermost ancestor (last element)
587
 * to outermost ancestor (first element), calling collect_filter_prefix_init
588
 * on each node as long as we have not been able to extract any information
589
 * yet and collect_filter_prefix_update afterwards.
590
 * If we come across an expansion node, then we interrupt the traversal
591
 * and call collect_filter_prefix_expansion to restart the traversal
592
 * over the remaining ancestors and to combine the results with those
593
 * that have already been collected.
594
 * If we come across an extension node and we are only computing
595
 * the universe domain, then we interrupt the traversal and call
596
 * collect_universe_domain_extension to restart the traversal
597
 * over the remaining ancestors and to combine the results with those
598
 * that have already been collected.
599
 * On successful return, data->initialized will be set since the outermost
600
 * ancestor is a domain node, which always results in an initialization.
601
 */
602
static int collect_filter_prefix(__isl_keep isl_schedule_tree_list *list,
603
  int n, struct isl_schedule_node_get_filter_prefix_data *data)
604
7.63k
{
605
7.63k
  int i;
606
7.63k
607
7.63k
  if (!list)
608
0
    return -1;
609
7.63k
610
33.9k
  
for (i = n - 1; 7.63k
i >= 0;
--i26.2k
) {
611
26.3k
    isl_schedule_tree *tree;
612
26.3k
    enum isl_schedule_node_type type;
613
26.3k
    int r;
614
26.3k
615
26.3k
    tree = isl_schedule_tree_list_get_schedule_tree(list, i);
616
26.3k
    if (!tree)
617
0
      return -1;
618
26.3k
    type = isl_schedule_tree_get_type(tree);
619
26.3k
    if (type == isl_schedule_node_expansion)
620
6
      return collect_filter_prefix_expansion(tree, list, i,
621
6
                data);
622
26.2k
    if (type == isl_schedule_node_extension &&
623
26.2k
        
data->universe_domain24
&&
!data->collect_prefix12
)
624
12
      return collect_universe_domain_extension(tree, list, i,
625
12
                data);
626
26.2k
    if (!data->initialized)
627
7.63k
      r = collect_filter_prefix_init(tree, data);
628
18.6k
    else
629
18.6k
      r = collect_filter_prefix_update(tree, data);
630
26.2k
    isl_schedule_tree_free(tree);
631
26.2k
    if (r < 0)
632
0
      return -1;
633
26.3k
  }
634
7.63k
635
7.63k
  
return 07.61k
;
636
7.63k
}
637
638
/* Return the concatenation of the partial schedules of all outer band
639
 * nodes of "node" interesected with all outer filters
640
 * as an isl_multi_union_pw_aff.
641
 * None of the ancestors of "node" may be an extension node, unless
642
 * there is also a filter ancestor that filters out all the extended
643
 * domain elements.
644
 *
645
 * If "node" is pointing at the root of the schedule tree, then
646
 * there are no domain elements reaching the current node, so
647
 * we return an empty result.
648
 *
649
 * We collect all the filters and partial schedules in collect_filter_prefix
650
 * and intersect the domain of the combined schedule with the combined filter.
651
 */
652
__isl_give isl_multi_union_pw_aff *
653
isl_schedule_node_get_prefix_schedule_multi_union_pw_aff(
654
  __isl_keep isl_schedule_node *node)
655
3
{
656
3
  int n;
657
3
  isl_space *space;
658
3
  struct isl_schedule_node_get_filter_prefix_data data;
659
3
660
3
  if (!node)
661
0
    return NULL;
662
3
663
3
  space = isl_schedule_get_space(node->schedule);
664
3
  space = isl_space_set_from_params(space);
665
3
  if (node->tree == node->schedule->root)
666
0
    return isl_multi_union_pw_aff_zero(space);
667
3
668
3
  data.initialized = 0;
669
3
  data.universe_domain = 1;
670
3
  data.universe_filter = 0;
671
3
  data.collect_prefix = 1;
672
3
  data.filter = NULL;
673
3
  data.prefix = isl_multi_union_pw_aff_zero(space);
674
3
675
3
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
676
3
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
677
0
    data.prefix = isl_multi_union_pw_aff_free(data.prefix);
678
3
679
3
  data.prefix = isl_multi_union_pw_aff_intersect_domain(data.prefix,
680
3
                data.filter);
681
3
682
3
  return data.prefix;
683
3
}
684
685
/* Return the concatenation of the partial schedules of all outer band
686
 * nodes of "node" interesected with all outer filters
687
 * as an isl_union_pw_multi_aff.
688
 * None of the ancestors of "node" may be an extension node, unless
689
 * there is also a filter ancestor that filters out all the extended
690
 * domain elements.
691
 *
692
 * If "node" is pointing at the root of the schedule tree, then
693
 * there are no domain elements reaching the current node, so
694
 * we return an empty result.
695
 *
696
 * We collect all the filters and partial schedules in collect_filter_prefix.
697
 * The partial schedules are collected as an isl_multi_union_pw_aff.
698
 * If this isl_multi_union_pw_aff is zero-dimensional, then it does not
699
 * contain any domain information, so we construct the isl_union_pw_multi_aff
700
 * result as a zero-dimensional function on the collected filter.
701
 * Otherwise, we convert the isl_multi_union_pw_aff to
702
 * an isl_multi_union_pw_aff and intersect the domain with the filter.
703
 */
704
__isl_give isl_union_pw_multi_aff *
705
isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(
706
  __isl_keep isl_schedule_node *node)
707
16
{
708
16
  int n;
709
16
  isl_space *space;
710
16
  isl_union_pw_multi_aff *prefix;
711
16
  struct isl_schedule_node_get_filter_prefix_data data;
712
16
713
16
  if (!node)
714
0
    return NULL;
715
16
716
16
  space = isl_schedule_get_space(node->schedule);
717
16
  if (node->tree == node->schedule->root)
718
0
    return isl_union_pw_multi_aff_empty(space);
719
16
720
16
  space = isl_space_set_from_params(space);
721
16
  data.initialized = 0;
722
16
  data.universe_domain = 1;
723
16
  data.universe_filter = 0;
724
16
  data.collect_prefix = 1;
725
16
  data.filter = NULL;
726
16
  data.prefix = isl_multi_union_pw_aff_zero(space);
727
16
728
16
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
729
16
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
730
0
    data.prefix = isl_multi_union_pw_aff_free(data.prefix);
731
16
732
16
  if (data.prefix &&
733
16
      isl_multi_union_pw_aff_dim(data.prefix, isl_dim_set) == 0) {
734
0
    isl_multi_union_pw_aff_free(data.prefix);
735
0
    prefix = isl_union_pw_multi_aff_from_domain(data.filter);
736
16
  } else {
737
16
    prefix =
738
16
        isl_union_pw_multi_aff_from_multi_union_pw_aff(data.prefix);
739
16
    prefix = isl_union_pw_multi_aff_intersect_domain(prefix,
740
16
                data.filter);
741
16
  }
742
16
743
16
  return prefix;
744
16
}
745
746
/* Return the concatenation of the partial schedules of all outer band
747
 * nodes of "node" interesected with all outer filters
748
 * as an isl_union_map.
749
 */
750
__isl_give isl_union_map *isl_schedule_node_get_prefix_schedule_union_map(
751
  __isl_keep isl_schedule_node *node)
752
14
{
753
14
  isl_union_pw_multi_aff *upma;
754
14
755
14
  upma = isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(node);
756
14
  return isl_union_map_from_union_pw_multi_aff(upma);
757
14
}
758
759
/* Return the concatenation of the partial schedules of all outer band
760
 * nodes of "node" intersected with all outer domain constraints.
761
 * None of the ancestors of "node" may be an extension node, unless
762
 * there is also a filter ancestor that filters out all the extended
763
 * domain elements.
764
 *
765
 * Essentially, this function intersects the domain of the output
766
 * of isl_schedule_node_get_prefix_schedule_union_map with the output
767
 * of isl_schedule_node_get_domain, except that it only traverses
768
 * the ancestors of "node" once.
769
 */
770
__isl_give isl_union_map *isl_schedule_node_get_prefix_schedule_relation(
771
  __isl_keep isl_schedule_node *node)
772
3.75k
{
773
3.75k
  int n;
774
3.75k
  isl_space *space;
775
3.75k
  isl_union_map *prefix;
776
3.75k
  struct isl_schedule_node_get_filter_prefix_data data;
777
3.75k
778
3.75k
  if (!node)
779
0
    return NULL;
780
3.75k
781
3.75k
  space = isl_schedule_get_space(node->schedule);
782
3.75k
  if (node->tree == node->schedule->root)
783
0
    return isl_union_map_empty(space);
784
3.75k
785
3.75k
  space = isl_space_set_from_params(space);
786
3.75k
  data.initialized = 0;
787
3.75k
  data.universe_domain = 0;
788
3.75k
  data.universe_filter = 0;
789
3.75k
  data.collect_prefix = 1;
790
3.75k
  data.filter = NULL;
791
3.75k
  data.prefix = isl_multi_union_pw_aff_zero(space);
792
3.75k
793
3.75k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
794
3.75k
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
795
0
    data.prefix = isl_multi_union_pw_aff_free(data.prefix);
796
3.75k
797
3.75k
  if (data.prefix &&
798
3.75k
      isl_multi_union_pw_aff_dim(data.prefix, isl_dim_set) == 0) {
799
396
    isl_multi_union_pw_aff_free(data.prefix);
800
396
    prefix = isl_union_map_from_domain(data.filter);
801
3.36k
  } else {
802
3.36k
    prefix = isl_union_map_from_multi_union_pw_aff(data.prefix);
803
3.36k
    prefix = isl_union_map_intersect_domain(prefix, data.filter);
804
3.36k
  }
805
3.75k
806
3.75k
  return prefix;
807
3.75k
}
808
809
/* Return the domain elements that reach "node".
810
 *
811
 * If "node" is pointing at the root of the schedule tree, then
812
 * there are no domain elements reaching the current node, so
813
 * we return an empty result.
814
 * None of the ancestors of "node" may be an extension node, unless
815
 * there is also a filter ancestor that filters out all the extended
816
 * domain elements.
817
 *
818
 * Otherwise, we collect all filters reaching the node,
819
 * intersected with the root domain in collect_filter_prefix.
820
 */
821
__isl_give isl_union_set *isl_schedule_node_get_domain(
822
  __isl_keep isl_schedule_node *node)
823
32
{
824
32
  int n;
825
32
  struct isl_schedule_node_get_filter_prefix_data data;
826
32
827
32
  if (!node)
828
0
    return NULL;
829
32
830
32
  if (node->tree == node->schedule->root) {
831
0
    isl_space *space;
832
0
833
0
    space = isl_schedule_get_space(node->schedule);
834
0
    return isl_union_set_empty(space);
835
0
  }
836
32
837
32
  data.initialized = 0;
838
32
  data.universe_domain = 0;
839
32
  data.universe_filter = 0;
840
32
  data.collect_prefix = 0;
841
32
  data.filter = NULL;
842
32
  data.prefix = NULL;
843
32
844
32
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
845
32
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
846
0
    data.filter = isl_union_set_free(data.filter);
847
32
848
32
  return data.filter;
849
32
}
850
851
/* Return the union of universe sets of the domain elements that reach "node".
852
 *
853
 * If "node" is pointing at the root of the schedule tree, then
854
 * there are no domain elements reaching the current node, so
855
 * we return an empty result.
856
 *
857
 * Otherwise, we collect the universes of all filters reaching the node
858
 * in collect_filter_prefix.
859
 */
860
__isl_give isl_union_set *isl_schedule_node_get_universe_domain(
861
  __isl_keep isl_schedule_node *node)
862
3.80k
{
863
3.80k
  int n;
864
3.80k
  struct isl_schedule_node_get_filter_prefix_data data;
865
3.80k
866
3.80k
  if (!node)
867
0
    return NULL;
868
3.80k
869
3.80k
  if (node->tree == node->schedule->root) {
870
0
    isl_space *space;
871
0
872
0
    space = isl_schedule_get_space(node->schedule);
873
0
    return isl_union_set_empty(space);
874
0
  }
875
3.80k
876
3.80k
  data.initialized = 0;
877
3.80k
  data.universe_domain = 1;
878
3.80k
  data.universe_filter = 1;
879
3.80k
  data.collect_prefix = 0;
880
3.80k
  data.filter = NULL;
881
3.80k
  data.prefix = NULL;
882
3.80k
883
3.80k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
884
3.80k
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
885
0
    data.filter = isl_union_set_free(data.filter);
886
3.80k
887
3.80k
  return data.filter;
888
3.80k
}
889
890
/* Return the subtree schedule of "node".
891
 *
892
 * Since isl_schedule_tree_get_subtree_schedule_union_map does not handle
893
 * trees that do not contain any schedule information, we first
894
 * move down to the first relevant descendant and handle leaves ourselves.
895
 *
896
 * If the subtree rooted at "node" contains any expansion nodes, then
897
 * the returned subtree schedule is formulated in terms of the expanded
898
 * domains.
899
 * The subtree is not allowed to contain any extension nodes.
900
 */
901
__isl_give isl_union_map *isl_schedule_node_get_subtree_schedule_union_map(
902
  __isl_keep isl_schedule_node *node)
903
1.48k
{
904
1.48k
  isl_schedule_tree *tree, *leaf;
905
1.48k
  isl_union_map *umap;
906
1.48k
907
1.48k
  tree = isl_schedule_node_get_tree(node);
908
1.48k
  leaf = isl_schedule_node_peek_leaf(node);
909
1.48k
  tree = isl_schedule_tree_first_schedule_descendant(tree, leaf);
910
1.48k
  if (!tree)
911
0
    return NULL;
912
1.48k
  if (tree == leaf) {
913
25
    isl_union_set *domain;
914
25
    domain = isl_schedule_node_get_universe_domain(node);
915
25
    isl_schedule_tree_free(tree);
916
25
    return isl_union_map_from_domain(domain);
917
25
  }
918
1.46k
919
1.46k
  umap = isl_schedule_tree_get_subtree_schedule_union_map(tree);
920
1.46k
  isl_schedule_tree_free(tree);
921
1.46k
  return umap;
922
1.46k
}
923
924
/* Return the number of ancestors of "node" in its schedule tree.
925
 */
926
int isl_schedule_node_get_tree_depth(__isl_keep isl_schedule_node *node)
927
458k
{
928
458k
  if (!node)
929
0
    return -1;
930
458k
  return isl_schedule_tree_list_n_schedule_tree(node->ancestors);
931
458k
}
932
933
/* Does "node" have a parent?
934
 *
935
 * That is, does it point to any node of the schedule other than the root?
936
 */
937
isl_bool isl_schedule_node_has_parent(__isl_keep isl_schedule_node *node)
938
86.1k
{
939
86.1k
  if (!node)
940
0
    return isl_bool_error;
941
86.1k
  if (!node->ancestors)
942
0
    return isl_bool_error;
943
86.1k
944
86.1k
  return isl_schedule_tree_list_n_schedule_tree(node->ancestors) != 0;
945
86.1k
}
946
947
/* Return the position of "node" among the children of its parent.
948
 */
949
int isl_schedule_node_get_child_position(__isl_keep isl_schedule_node *node)
950
0
{
951
0
  int n;
952
0
  int has_parent;
953
0
954
0
  if (!node)
955
0
    return -1;
956
0
  has_parent = isl_schedule_node_has_parent(node);
957
0
  if (has_parent < 0)
958
0
    return -1;
959
0
  if (!has_parent)
960
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
961
0
      "node has no parent", return -1);
962
0
963
0
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
964
0
  return node->child_pos[n - 1];
965
0
}
966
967
/* Does the parent (if any) of "node" have any children with a smaller child
968
 * position than this one?
969
 */
970
isl_bool isl_schedule_node_has_previous_sibling(
971
  __isl_keep isl_schedule_node *node)
972
0
{
973
0
  int n;
974
0
  isl_bool has_parent;
975
0
976
0
  if (!node)
977
0
    return isl_bool_error;
978
0
  has_parent = isl_schedule_node_has_parent(node);
979
0
  if (has_parent < 0 || !has_parent)
980
0
    return has_parent;
981
0
982
0
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
983
0
984
0
  return node->child_pos[n - 1] > 0;
985
0
}
986
987
/* Does the parent (if any) of "node" have any children with a greater child
988
 * position than this one?
989
 */
990
isl_bool isl_schedule_node_has_next_sibling(__isl_keep isl_schedule_node *node)
991
47.4k
{
992
47.4k
  int n, n_child;
993
47.4k
  isl_bool has_parent;
994
47.4k
  isl_schedule_tree *tree;
995
47.4k
996
47.4k
  if (!node)
997
0
    return isl_bool_error;
998
47.4k
  has_parent = isl_schedule_node_has_parent(node);
999
47.4k
  if (has_parent < 0 || !has_parent)
1000
0
    return has_parent;
1001
47.4k
1002
47.4k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
1003
47.4k
  tree = isl_schedule_tree_list_get_schedule_tree(node->ancestors, n - 1);
1004
47.4k
  if (!tree)
1005
0
    return isl_bool_error;
1006
47.4k
  n_child = isl_schedule_tree_list_n_schedule_tree(tree->children);
1007
47.4k
  isl_schedule_tree_free(tree);
1008
47.4k
1009
47.4k
  return node->child_pos[n - 1] + 1 < n_child;
1010
47.4k
}
1011
1012
/* Does "node" have any children?
1013
 *
1014
 * Any node other than the leaf nodes is considered to have at least
1015
 * one child, even if the corresponding isl_schedule_tree does not
1016
 * have any children.
1017
 */
1018
isl_bool isl_schedule_node_has_children(__isl_keep isl_schedule_node *node)
1019
78.7k
{
1020
78.7k
  if (!node)
1021
0
    return isl_bool_error;
1022
78.7k
  return !isl_schedule_tree_is_leaf(node->tree);
1023
78.7k
}
1024
1025
/* Return the number of children of "node"?
1026
 *
1027
 * Any node other than the leaf nodes is considered to have at least
1028
 * one child, even if the corresponding isl_schedule_tree does not
1029
 * have any children.  That is, the number of children of "node" is
1030
 * only zero if its tree is the explicit empty tree.  Otherwise,
1031
 * if the isl_schedule_tree has any children, then it is equal
1032
 * to the number of children of "node".  If it has zero children,
1033
 * then "node" still has a leaf node as child.
1034
 */
1035
int isl_schedule_node_n_children(__isl_keep isl_schedule_node *node)
1036
705
{
1037
705
  int n;
1038
705
1039
705
  if (!node)
1040
0
    return -1;
1041
705
1042
705
  if (isl_schedule_tree_is_leaf(node->tree))
1043
0
    return 0;
1044
705
1045
705
  n = isl_schedule_tree_n_children(node->tree);
1046
705
  if (n == 0)
1047
173
    return 1;
1048
532
1049
532
  return n;
1050
532
}
1051
1052
/* Move the "node" pointer to the ancestor of the given generation
1053
 * of the node it currently points to, where generation 0 is the node
1054
 * itself and generation 1 is its parent.
1055
 */
1056
__isl_give isl_schedule_node *isl_schedule_node_ancestor(
1057
  __isl_take isl_schedule_node *node, int generation)
1058
64.3k
{
1059
64.3k
  int n;
1060
64.3k
  isl_schedule_tree *tree;
1061
64.3k
1062
64.3k
  if (!node)
1063
0
    return NULL;
1064
64.3k
  if (generation == 0)
1065
0
    return node;
1066
64.3k
  n = isl_schedule_node_get_tree_depth(node);
1067
64.3k
  if (n < 0)
1068
0
    return isl_schedule_node_free(node);
1069
64.3k
  if (generation < 0 || generation > n)
1070
64.3k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1071
64.3k
      "generation out of bounds",
1072
64.3k
      return isl_schedule_node_free(node));
1073
64.3k
  node = isl_schedule_node_cow(node);
1074
64.3k
  if (!node)
1075
0
    return NULL;
1076
64.3k
1077
64.3k
  tree = isl_schedule_tree_list_get_schedule_tree(node->ancestors,
1078
64.3k
              n - generation);
1079
64.3k
  isl_schedule_tree_free(node->tree);
1080
64.3k
  node->tree = tree;
1081
64.3k
  node->ancestors = isl_schedule_tree_list_drop(node->ancestors,
1082
64.3k
                n - generation, generation);
1083
64.3k
  if (!node->ancestors || !node->tree)
1084
0
    return isl_schedule_node_free(node);
1085
64.3k
1086
64.3k
  return node;
1087
64.3k
}
1088
1089
/* Move the "node" pointer to the parent of the node it currently points to.
1090
 */
1091
__isl_give isl_schedule_node *isl_schedule_node_parent(
1092
  __isl_take isl_schedule_node *node)
1093
33.1k
{
1094
33.1k
  if (!node)
1095
0
    return NULL;
1096
33.1k
  if (!isl_schedule_node_has_parent(node))
1097
33.1k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1098
33.1k
      "node has no parent",
1099
33.1k
      return isl_schedule_node_free(node));
1100
33.1k
  return isl_schedule_node_ancestor(node, 1);
1101
33.1k
}
1102
1103
/* Move the "node" pointer to the root of its schedule tree.
1104
 */
1105
__isl_give isl_schedule_node *isl_schedule_node_root(
1106
  __isl_take isl_schedule_node *node)
1107
1
{
1108
1
  int n;
1109
1
1110
1
  if (!node)
1111
0
    return NULL;
1112
1
  n = isl_schedule_node_get_tree_depth(node);
1113
1
  if (n < 0)
1114
0
    return isl_schedule_node_free(node);
1115
1
  return isl_schedule_node_ancestor(node, n);
1116
1
}
1117
1118
/* Move the "node" pointer to the child at position "pos" of the node
1119
 * it currently points to.
1120
 */
1121
__isl_give isl_schedule_node *isl_schedule_node_child(
1122
  __isl_take isl_schedule_node *node, int pos)
1123
40.1k
{
1124
40.1k
  int n;
1125
40.1k
  isl_ctx *ctx;
1126
40.1k
  isl_schedule_tree *tree;
1127
40.1k
  int *child_pos;
1128
40.1k
1129
40.1k
  node = isl_schedule_node_cow(node);
1130
40.1k
  if (!node)
1131
0
    return NULL;
1132
40.1k
  if (!isl_schedule_node_has_children(node))
1133
40.1k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1134
40.1k
      "node has no children",
1135
40.1k
      return isl_schedule_node_free(node));
1136
40.1k
1137
40.1k
  ctx = isl_schedule_node_get_ctx(node);
1138
40.1k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
1139
40.1k
  child_pos = isl_realloc_array(ctx, node->child_pos, int, n + 1);
1140
40.1k
  if (!child_pos)
1141
0
    return isl_schedule_node_free(node);
1142
40.1k
  node->child_pos = child_pos;
1143
40.1k
  node->child_pos[n] = pos;
1144
40.1k
1145
40.1k
  node->ancestors = isl_schedule_tree_list_add(node->ancestors,
1146
40.1k
        isl_schedule_tree_copy(node->tree));
1147
40.1k
  tree = node->tree;
1148
40.1k
  if (isl_schedule_tree_has_children(tree))
1149
22.0k
    tree = isl_schedule_tree_get_child(tree, pos);
1150
18.1k
  else
1151
18.1k
    tree = isl_schedule_node_get_leaf(node);
1152
40.1k
  isl_schedule_tree_free(node->tree);
1153
40.1k
  node->tree = tree;
1154
40.1k
1155
40.1k
  if (!node->tree || !node->ancestors)
1156
0
    return isl_schedule_node_free(node);
1157
40.1k
1158
40.1k
  return node;
1159
40.1k
}
1160
1161
/* Move the "node" pointer to the first child of the node
1162
 * it currently points to.
1163
 */
1164
__isl_give isl_schedule_node *isl_schedule_node_first_child(
1165
  __isl_take isl_schedule_node *node)
1166
31.7k
{
1167
31.7k
  return isl_schedule_node_child(node, 0);
1168
31.7k
}
1169
1170
/* Move the "node" pointer to the child of this node's parent in
1171
 * the previous child position.
1172
 */
1173
__isl_give isl_schedule_node *isl_schedule_node_previous_sibling(
1174
  __isl_take isl_schedule_node *node)
1175
0
{
1176
0
  int n;
1177
0
  isl_schedule_tree *parent, *tree;
1178
0
1179
0
  node = isl_schedule_node_cow(node);
1180
0
  if (!node)
1181
0
    return NULL;
1182
0
  if (!isl_schedule_node_has_previous_sibling(node))
1183
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1184
0
      "node has no previous sibling",
1185
0
      return isl_schedule_node_free(node));
1186
0
1187
0
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
1188
0
  parent = isl_schedule_tree_list_get_schedule_tree(node->ancestors,
1189
0
                  n - 1);
1190
0
  if (!parent)
1191
0
    return isl_schedule_node_free(node);
1192
0
  node->child_pos[n - 1]--;
1193
0
  tree = isl_schedule_tree_list_get_schedule_tree(parent->children,
1194
0
              node->child_pos[n - 1]);
1195
0
  isl_schedule_tree_free(parent);
1196
0
  if (!tree)
1197
0
    return isl_schedule_node_free(node);
1198
0
  isl_schedule_tree_free(node->tree);
1199
0
  node->tree = tree;
1200
0
1201
0
  return node;
1202
0
}
1203
1204
/* Move the "node" pointer to the child of this node's parent in
1205
 * the next child position.
1206
 */
1207
__isl_give isl_schedule_node *isl_schedule_node_next_sibling(
1208
  __isl_take isl_schedule_node *node)
1209
7.71k
{
1210
7.71k
  int n;
1211
7.71k
  isl_schedule_tree *parent, *tree;
1212
7.71k
1213
7.71k
  node = isl_schedule_node_cow(node);
1214
7.71k
  if (!node)
1215
0
    return NULL;
1216
7.71k
  if (!isl_schedule_node_has_next_sibling(node))
1217
7.71k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1218
7.71k
      "node has no next sibling",
1219
7.71k
      return isl_schedule_node_free(node));
1220
7.71k
1221
7.71k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
1222
7.71k
  parent = isl_schedule_tree_list_get_schedule_tree(node->ancestors,
1223
7.71k
                  n - 1);
1224
7.71k
  if (!parent)
1225
0
    return isl_schedule_node_free(node);
1226
7.71k
  node->child_pos[n - 1]++;
1227
7.71k
  tree = isl_schedule_tree_list_get_schedule_tree(parent->children,
1228
7.71k
              node->child_pos[n - 1]);
1229
7.71k
  isl_schedule_tree_free(parent);
1230
7.71k
  if (!tree)
1231
0
    return isl_schedule_node_free(node);
1232
7.71k
  isl_schedule_tree_free(node->tree);
1233
7.71k
  node->tree = tree;
1234
7.71k
1235
7.71k
  return node;
1236
7.71k
}
1237
1238
/* Return a copy to the child at position "pos" of "node".
1239
 */
1240
__isl_give isl_schedule_node *isl_schedule_node_get_child(
1241
  __isl_keep isl_schedule_node *node, int pos)
1242
487
{
1243
487
  return isl_schedule_node_child(isl_schedule_node_copy(node), pos);
1244
487
}
1245
1246
/* Traverse the descendant of "node" in depth-first order, including
1247
 * "node" itself.  Call "enter" whenever a node is entered and "leave"
1248
 * whenever a node is left.  The callback "enter" is responsible
1249
 * for moving to the deepest initial subtree of its argument that
1250
 * should be traversed.
1251
 */
1252
static __isl_give isl_schedule_node *traverse(
1253
  __isl_take isl_schedule_node *node,
1254
  __isl_give isl_schedule_node *(*enter)(
1255
    __isl_take isl_schedule_node *node, void *user),
1256
  __isl_give isl_schedule_node *(*leave)(
1257
    __isl_take isl_schedule_node *node, void *user),
1258
  void *user)
1259
6.96k
{
1260
6.96k
  int depth;
1261
6.96k
1262
6.96k
  if (!node)
1263
0
    return NULL;
1264
6.96k
1265
6.96k
  depth = isl_schedule_node_get_tree_depth(node);
1266
14.6k
  do {
1267
14.6k
    node = enter(node, user);
1268
14.6k
    node = leave(node, user);
1269
46.7k
    while (node && 
isl_schedule_node_get_tree_depth(node) > depth46.6k
&&
1270
46.7k
        
!isl_schedule_node_has_next_sibling(node)39.7k
) {
1271
32.0k
      node = isl_schedule_node_parent(node);
1272
32.0k
      node = leave(node, user);
1273
32.0k
    }
1274
14.6k
    if (node && 
isl_schedule_node_get_tree_depth(node) > depth14.6k
)
1275
7.71k
      node = isl_schedule_node_next_sibling(node);
1276
14.6k
  } while (node && 
isl_schedule_node_get_tree_depth(node) > depth14.6k
);
1277
6.96k
1278
6.96k
  return node;
1279
6.96k
}
1280
1281
/* Internal data structure for isl_schedule_node_foreach_descendant_top_down.
1282
 *
1283
 * "fn" is the user-specified callback function.
1284
 * "user" is the user-specified argument for the callback.
1285
 */
1286
struct isl_schedule_node_preorder_data {
1287
  isl_bool (*fn)(__isl_keep isl_schedule_node *node, void *user);
1288
  void *user;
1289
};
1290
1291
/* Callback for "traverse" to enter a node and to move
1292
 * to the deepest initial subtree that should be traversed
1293
 * for use in a preorder visit.
1294
 *
1295
 * If the user callback returns a negative value, then we abort
1296
 * the traversal.  If this callback returns zero, then we skip
1297
 * the subtree rooted at the current node.  Otherwise, we move
1298
 * down to the first child and repeat the process until a leaf
1299
 * is reached.
1300
 */
1301
static __isl_give isl_schedule_node *preorder_enter(
1302
  __isl_take isl_schedule_node *node, void *user)
1303
13.2k
{
1304
13.2k
  struct isl_schedule_node_preorder_data *data = user;
1305
13.2k
1306
13.2k
  if (!node)
1307
0
    return NULL;
1308
13.2k
1309
43.1k
  
do 13.2k
{
1310
43.1k
    isl_bool r;
1311
43.1k
1312
43.1k
    r = data->fn(node, data->user);
1313
43.1k
    if (r < 0)
1314
16
      return isl_schedule_node_free(node);
1315
43.1k
    if (r == isl_bool_false)
1316
7.46k
      return node;
1317
35.6k
  } while (isl_schedule_node_has_children(node) &&
1318
35.6k
    
(node = isl_schedule_node_first_child(node)) != NULL29.8k
);
1319
13.2k
1320
13.2k
  
return node5.78k
;
1321
13.2k
}
1322
1323
/* Callback for "traverse" to leave a node
1324
 * for use in a preorder visit.
1325
 * Since we already visited the node when we entered it,
1326
 * we do not need to do anything here.
1327
 */
1328
static __isl_give isl_schedule_node *preorder_leave(
1329
  __isl_take isl_schedule_node *node, void *user)
1330
43.0k
{
1331
43.0k
  return node;
1332
43.0k
}
1333
1334
/* Traverse the descendants of "node" (including the node itself)
1335
 * in depth first preorder.
1336
 *
1337
 * If "fn" returns -1 on any of the nodes, then the traversal is aborted.
1338
 * If "fn" returns 0 on any of the nodes, then the subtree rooted
1339
 * at that node is skipped.
1340
 *
1341
 * Return 0 on success and -1 on failure.
1342
 */
1343
isl_stat isl_schedule_node_foreach_descendant_top_down(
1344
  __isl_keep isl_schedule_node *node,
1345
  isl_bool (*fn)(__isl_keep isl_schedule_node *node, void *user),
1346
  void *user)
1347
6.41k
{
1348
6.41k
  struct isl_schedule_node_preorder_data data = { fn, user };
1349
6.41k
1350
6.41k
  node = isl_schedule_node_copy(node);
1351
6.41k
  node = traverse(node, &preorder_enter, &preorder_leave, &data);
1352
6.41k
  isl_schedule_node_free(node);
1353
6.41k
1354
6.41k
  return node ? 
isl_stat_ok6.40k
:
isl_stat_error16
;
1355
6.41k
}
1356
1357
/* Internal data structure for isl_schedule_node_map_descendant_bottom_up.
1358
 *
1359
 * "fn" is the user-specified callback function.
1360
 * "user" is the user-specified argument for the callback.
1361
 */
1362
struct isl_schedule_node_postorder_data {
1363
  __isl_give isl_schedule_node *(*fn)(__isl_take isl_schedule_node *node,
1364
    void *user);
1365
  void *user;
1366
};
1367
1368
/* Callback for "traverse" to enter a node and to move
1369
 * to the deepest initial subtree that should be traversed
1370
 * for use in a postorder visit.
1371
 *
1372
 * Since we are performing a postorder visit, we only need
1373
 * to move to the deepest initial leaf here.
1374
 */
1375
static __isl_give isl_schedule_node *postorder_enter(
1376
  __isl_take isl_schedule_node *node, void *user)
1377
344
{
1378
1.24k
  while (node && isl_schedule_node_has_children(node))
1379
904
    node = isl_schedule_node_first_child(node);
1380
344
1381
344
  return node;
1382
344
}
1383
1384
/* Callback for "traverse" to leave a node
1385
 * for use in a postorder visit.
1386
 *
1387
 * Since we are performing a postorder visit, we need
1388
 * to call the user callback here.
1389
 */
1390
static __isl_give isl_schedule_node *postorder_leave(
1391
  __isl_take isl_schedule_node *node, void *user)
1392
1.56k
{
1393
1.56k
  struct isl_schedule_node_postorder_data *data = user;
1394
1.56k
1395
1.56k
  return data->fn(node, data->user);
1396
1.56k
}
1397
1398
/* Traverse the descendants of "node" (including the node itself)
1399
 * in depth first postorder, allowing the user to modify the visited node.
1400
 * The traversal continues from the node returned by the callback function.
1401
 * It is the responsibility of the user to ensure that this does not
1402
 * lead to an infinite loop.  It is safest to always return a pointer
1403
 * to the same position (same ancestors and child positions) as the input node.
1404
 */
1405
__isl_give isl_schedule_node *isl_schedule_node_map_descendant_bottom_up(
1406
  __isl_take isl_schedule_node *node,
1407
  __isl_give isl_schedule_node *(*fn)(__isl_take isl_schedule_node *node,
1408
    void *user), void *user)
1409
185
{
1410
185
  struct isl_schedule_node_postorder_data data = { fn, user };
1411
185
1412
185
  return traverse(node, &postorder_enter, &postorder_leave, &data);
1413
185
}
1414
1415
/* Traverse the ancestors of "node" from the root down to and including
1416
 * the parent of "node", calling "fn" on each of them.
1417
 *
1418
 * If "fn" returns -1 on any of the nodes, then the traversal is aborted.
1419
 *
1420
 * Return 0 on success and -1 on failure.
1421
 */
1422
isl_stat isl_schedule_node_foreach_ancestor_top_down(
1423
  __isl_keep isl_schedule_node *node,
1424
  isl_stat (*fn)(__isl_keep isl_schedule_node *node, void *user),
1425
  void *user)
1426
0
{
1427
0
  int i, n;
1428
0
1429
0
  if (!node)
1430
0
    return isl_stat_error;
1431
0
1432
0
  n = isl_schedule_node_get_tree_depth(node);
1433
0
  for (i = 0; i < n; ++i) {
1434
0
    isl_schedule_node *ancestor;
1435
0
    isl_stat r;
1436
0
1437
0
    ancestor = isl_schedule_node_copy(node);
1438
0
    ancestor = isl_schedule_node_ancestor(ancestor, n - i);
1439
0
    r = fn(ancestor, user);
1440
0
    isl_schedule_node_free(ancestor);
1441
0
    if (r < 0)
1442
0
      return isl_stat_error;
1443
0
  }
1444
0
1445
0
  return isl_stat_ok;
1446
0
}
1447
1448
/* Is any node in the subtree rooted at "node" anchored?
1449
 * That is, do any of these nodes reference the outer band nodes?
1450
 */
1451
isl_bool isl_schedule_node_is_subtree_anchored(
1452
  __isl_keep isl_schedule_node *node)
1453
3.91k
{
1454
3.91k
  if (!node)
1455
0
    return isl_bool_error;
1456
3.91k
  return isl_schedule_tree_is_subtree_anchored(node->tree);
1457
3.91k
}
1458
1459
/* Return the number of members in the given band node.
1460
 */
1461
unsigned isl_schedule_node_band_n_member(__isl_keep isl_schedule_node *node)
1462
1.61k
{
1463
1.61k
  return node ? isl_schedule_tree_band_n_member(node->tree) : 
00
;
1464
1.61k
}
1465
1466
/* Is the band member at position "pos" of the band node "node"
1467
 * marked coincident?
1468
 */
1469
isl_bool isl_schedule_node_band_member_get_coincident(
1470
  __isl_keep isl_schedule_node *node, int pos)
1471
809
{
1472
809
  if (!node)
1473
0
    return isl_bool_error;
1474
809
  return isl_schedule_tree_band_member_get_coincident(node->tree, pos);
1475
809
}
1476
1477
/* Mark the band member at position "pos" the band node "node"
1478
 * as being coincident or not according to "coincident".
1479
 */
1480
__isl_give isl_schedule_node *isl_schedule_node_band_member_set_coincident(
1481
  __isl_take isl_schedule_node *node, int pos, int coincident)
1482
277
{
1483
277
  int c;
1484
277
  isl_schedule_tree *tree;
1485
277
1486
277
  if (!node)
1487
0
    return NULL;
1488
277
  c = isl_schedule_node_band_member_get_coincident(node, pos);
1489
277
  if (c == coincident)
1490
92
    return node;
1491
185
1492
185
  tree = isl_schedule_tree_copy(node->tree);
1493
185
  tree = isl_schedule_tree_band_member_set_coincident(tree, pos,
1494
185
                  coincident);
1495
185
  node = isl_schedule_node_graft_tree(node, tree);
1496
185
1497
185
  return node;
1498
185
}
1499
1500
/* Is the band node "node" marked permutable?
1501
 */
1502
isl_bool isl_schedule_node_band_get_permutable(
1503
  __isl_keep isl_schedule_node *node)
1504
646
{
1505
646
  if (!node)
1506
0
    return isl_bool_error;
1507
646
1508
646
  return isl_schedule_tree_band_get_permutable(node->tree);
1509
646
}
1510
1511
/* Mark the band node "node" permutable or not according to "permutable"?
1512
 */
1513
__isl_give isl_schedule_node *isl_schedule_node_band_set_permutable(
1514
  __isl_take isl_schedule_node *node, int permutable)
1515
171
{
1516
171
  isl_schedule_tree *tree;
1517
171
1518
171
  if (!node)
1519
0
    return NULL;
1520
171
  if (isl_schedule_node_band_get_permutable(node) == permutable)
1521
30
    return node;
1522
141
1523
141
  tree = isl_schedule_tree_copy(node->tree);
1524
141
  tree = isl_schedule_tree_band_set_permutable(tree, permutable);
1525
141
  node = isl_schedule_node_graft_tree(node, tree);
1526
141
1527
141
  return node;
1528
141
}
1529
1530
/* Return the schedule space of the band node.
1531
 */
1532
__isl_give isl_space *isl_schedule_node_band_get_space(
1533
  __isl_keep isl_schedule_node *node)
1534
298
{
1535
298
  if (!node)
1536
0
    return NULL;
1537
298
1538
298
  return isl_schedule_tree_band_get_space(node->tree);
1539
298
}
1540
1541
/* Return the schedule of the band node in isolation.
1542
 */
1543
__isl_give isl_multi_union_pw_aff *isl_schedule_node_band_get_partial_schedule(
1544
  __isl_keep isl_schedule_node *node)
1545
718
{
1546
718
  if (!node)
1547
0
    return NULL;
1548
718
1549
718
  return isl_schedule_tree_band_get_partial_schedule(node->tree);
1550
718
}
1551
1552
/* Return the schedule of the band node in isolation in the form of
1553
 * an isl_union_map.
1554
 *
1555
 * If the band does not have any members, then we construct a universe map
1556
 * with the universe of the domain elements reaching the node as domain.
1557
 * Otherwise, we extract an isl_multi_union_pw_aff representation and
1558
 * convert that to an isl_union_map.
1559
 */
1560
__isl_give isl_union_map *isl_schedule_node_band_get_partial_schedule_union_map(
1561
  __isl_keep isl_schedule_node *node)
1562
35
{
1563
35
  isl_multi_union_pw_aff *mupa;
1564
35
1565
35
  if (!node)
1566
0
    return NULL;
1567
35
1568
35
  if (isl_schedule_node_get_type(node) != isl_schedule_node_band)
1569
35
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1570
35
      "not a band node", return NULL);
1571
35
  if (isl_schedule_node_band_n_member(node) == 0) {
1572
0
    isl_union_set *domain;
1573
0
1574
0
    domain = isl_schedule_node_get_universe_domain(node);
1575
0
    return isl_union_map_from_domain(domain);
1576
0
  }
1577
35
1578
35
  mupa = isl_schedule_node_band_get_partial_schedule(node);
1579
35
  return isl_union_map_from_multi_union_pw_aff(mupa);
1580
35
}
1581
1582
/* Return the loop AST generation type for the band member of band node "node"
1583
 * at position "pos".
1584
 */
1585
enum isl_ast_loop_type isl_schedule_node_band_member_get_ast_loop_type(
1586
  __isl_keep isl_schedule_node *node, int pos)
1587
790
{
1588
790
  if (!node)
1589
0
    return isl_ast_loop_error;
1590
790
1591
790
  return isl_schedule_tree_band_member_get_ast_loop_type(node->tree, pos);
1592
790
}
1593
1594
/* Set the loop AST generation type for the band member of band node "node"
1595
 * at position "pos" to "type".
1596
 */
1597
__isl_give isl_schedule_node *isl_schedule_node_band_member_set_ast_loop_type(
1598
  __isl_take isl_schedule_node *node, int pos,
1599
  enum isl_ast_loop_type type)
1600
0
{
1601
0
  isl_schedule_tree *tree;
1602
0
1603
0
  if (!node)
1604
0
    return NULL;
1605
0
1606
0
  tree = isl_schedule_tree_copy(node->tree);
1607
0
  tree = isl_schedule_tree_band_member_set_ast_loop_type(tree, pos, type);
1608
0
  return isl_schedule_node_graft_tree(node, tree);
1609
0
}
1610
1611
/* Return the loop AST generation type for the band member of band node "node"
1612
 * at position "pos" for the isolated part.
1613
 */
1614
enum isl_ast_loop_type isl_schedule_node_band_member_get_isolate_ast_loop_type(
1615
  __isl_keep isl_schedule_node *node, int pos)
1616
913
{
1617
913
  if (!node)
1618
0
    return isl_ast_loop_error;
1619
913
1620
913
  return isl_schedule_tree_band_member_get_isolate_ast_loop_type(
1621
913
                  node->tree, pos);
1622
913
}
1623
1624
/* Set the loop AST generation type for the band member of band node "node"
1625
 * at position "pos" for the isolated part to "type".
1626
 */
1627
__isl_give isl_schedule_node *
1628
isl_schedule_node_band_member_set_isolate_ast_loop_type(
1629
  __isl_take isl_schedule_node *node, int pos,
1630
  enum isl_ast_loop_type type)
1631
0
{
1632
0
  isl_schedule_tree *tree;
1633
0
1634
0
  if (!node)
1635
0
    return NULL;
1636
0
1637
0
  tree = isl_schedule_tree_copy(node->tree);
1638
0
  tree = isl_schedule_tree_band_member_set_isolate_ast_loop_type(tree,
1639
0
                    pos, type);
1640
0
  return isl_schedule_node_graft_tree(node, tree);
1641
0
}
1642
1643
/* Return the AST build options associated to band node "node".
1644
 */
1645
__isl_give isl_union_set *isl_schedule_node_band_get_ast_build_options(
1646
  __isl_keep isl_schedule_node *node)
1647
0
{
1648
0
  if (!node)
1649
0
    return NULL;
1650
0
1651
0
  return isl_schedule_tree_band_get_ast_build_options(node->tree);
1652
0
}
1653
1654
/* Replace the AST build options associated to band node "node" by "options".
1655
 */
1656
__isl_give isl_schedule_node *isl_schedule_node_band_set_ast_build_options(
1657
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *options)
1658
70
{
1659
70
  isl_schedule_tree *tree;
1660
70
1661
70
  if (!node || !options)
1662
0
    goto error;
1663
70
1664
70
  tree = isl_schedule_tree_copy(node->tree);
1665
70
  tree = isl_schedule_tree_band_set_ast_build_options(tree, options);
1666
70
  return isl_schedule_node_graft_tree(node, tree);
1667
0
error:
1668
0
  isl_schedule_node_free(node);
1669
0
  isl_union_set_free(options);
1670
0
  return NULL;
1671
70
}
1672
1673
/* Return the "isolate" option associated to band node "node".
1674
 */
1675
__isl_give isl_set *isl_schedule_node_band_get_ast_isolate_option(
1676
  __isl_keep isl_schedule_node *node)
1677
2.31k
{
1678
2.31k
  int depth;
1679
2.31k
1680
2.31k
  if (!node)
1681
0
    return NULL;
1682
2.31k
1683
2.31k
  depth = isl_schedule_node_get_schedule_depth(node);
1684
2.31k
  return isl_schedule_tree_band_get_ast_isolate_option(node->tree, depth);
1685
2.31k
}
1686
1687
/* Make sure that that spaces of "node" and "mv" are the same.
1688
 * Return -1 on error, reporting the error to the user.
1689
 */
1690
static int check_space_multi_val(__isl_keep isl_schedule_node *node,
1691
  __isl_keep isl_multi_val *mv)
1692
76
{
1693
76
  isl_space *node_space, *mv_space;
1694
76
  int equal;
1695
76
1696
76
  node_space = isl_schedule_node_band_get_space(node);
1697
76
  mv_space = isl_multi_val_get_space(mv);
1698
76
  equal = isl_space_tuple_is_equal(node_space, isl_dim_set,
1699
76
          mv_space, isl_dim_set);
1700
76
  isl_space_free(mv_space);
1701
76
  isl_space_free(node_space);
1702
76
  if (equal < 0)
1703
0
    return -1;
1704
76
  if (!equal)
1705
76
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1706
76
      "spaces don't match", return -1);
1707
76
1708
76
  return 0;
1709
76
}
1710
1711
/* Multiply the partial schedule of the band node "node"
1712
 * with the factors in "mv".
1713
 */
1714
__isl_give isl_schedule_node *isl_schedule_node_band_scale(
1715
  __isl_take isl_schedule_node *node, __isl_take isl_multi_val *mv)
1716
0
{
1717
0
  isl_schedule_tree *tree;
1718
0
  int anchored;
1719
0
1720
0
  if (!node || !mv)
1721
0
    goto error;
1722
0
  if (check_space_multi_val(node, mv) < 0)
1723
0
    goto error;
1724
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
1725
0
  if (anchored < 0)
1726
0
    goto error;
1727
0
  if (anchored)
1728
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1729
0
      "cannot scale band node with anchored subtree",
1730
0
      goto error);
1731
0
1732
0
  tree = isl_schedule_node_get_tree(node);
1733
0
  tree = isl_schedule_tree_band_scale(tree, mv);
1734
0
  return isl_schedule_node_graft_tree(node, tree);
1735
0
error:
1736
0
  isl_multi_val_free(mv);
1737
0
  isl_schedule_node_free(node);
1738
0
  return NULL;
1739
0
}
1740
1741
/* Divide the partial schedule of the band node "node"
1742
 * by the factors in "mv".
1743
 */
1744
__isl_give isl_schedule_node *isl_schedule_node_band_scale_down(
1745
  __isl_take isl_schedule_node *node, __isl_take isl_multi_val *mv)
1746
0
{
1747
0
  isl_schedule_tree *tree;
1748
0
  int anchored;
1749
0
1750
0
  if (!node || !mv)
1751
0
    goto error;
1752
0
  if (check_space_multi_val(node, mv) < 0)
1753
0
    goto error;
1754
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
1755
0
  if (anchored < 0)
1756
0
    goto error;
1757
0
  if (anchored)
1758
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1759
0
      "cannot scale down band node with anchored subtree",
1760
0
      goto error);
1761
0
1762
0
  tree = isl_schedule_node_get_tree(node);
1763
0
  tree = isl_schedule_tree_band_scale_down(tree, mv);
1764
0
  return isl_schedule_node_graft_tree(node, tree);
1765
0
error:
1766
0
  isl_multi_val_free(mv);
1767
0
  isl_schedule_node_free(node);
1768
0
  return NULL;
1769
0
}
1770
1771
/* Reduce the partial schedule of the band node "node"
1772
 * modulo the factors in "mv".
1773
 */
1774
__isl_give isl_schedule_node *isl_schedule_node_band_mod(
1775
  __isl_take isl_schedule_node *node, __isl_take isl_multi_val *mv)
1776
0
{
1777
0
  isl_schedule_tree *tree;
1778
0
  isl_bool anchored;
1779
0
1780
0
  if (!node || !mv)
1781
0
    goto error;
1782
0
  if (check_space_multi_val(node, mv) < 0)
1783
0
    goto error;
1784
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
1785
0
  if (anchored < 0)
1786
0
    goto error;
1787
0
  if (anchored)
1788
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1789
0
      "cannot perform mod on band node with anchored subtree",
1790
0
      goto error);
1791
0
1792
0
  tree = isl_schedule_node_get_tree(node);
1793
0
  tree = isl_schedule_tree_band_mod(tree, mv);
1794
0
  return isl_schedule_node_graft_tree(node, tree);
1795
0
error:
1796
0
  isl_multi_val_free(mv);
1797
0
  isl_schedule_node_free(node);
1798
0
  return NULL;
1799
0
}
1800
1801
/* Make sure that that spaces of "node" and "mupa" are the same.
1802
 * Return isl_stat_error on error, reporting the error to the user.
1803
 */
1804
static isl_stat check_space_multi_union_pw_aff(
1805
  __isl_keep isl_schedule_node *node,
1806
  __isl_keep isl_multi_union_pw_aff *mupa)
1807
0
{
1808
0
  isl_space *node_space, *mupa_space;
1809
0
  isl_bool equal;
1810
0
1811
0
  node_space = isl_schedule_node_band_get_space(node);
1812
0
  mupa_space = isl_multi_union_pw_aff_get_space(mupa);
1813
0
  equal = isl_space_tuple_is_equal(node_space, isl_dim_set,
1814
0
          mupa_space, isl_dim_set);
1815
0
  isl_space_free(mupa_space);
1816
0
  isl_space_free(node_space);
1817
0
  if (equal < 0)
1818
0
    return isl_stat_error;
1819
0
  if (!equal)
1820
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1821
0
      "spaces don't match", return isl_stat_error);
1822
0
1823
0
  return isl_stat_ok;
1824
0
}
1825
1826
/* Shift the partial schedule of the band node "node" by "shift".
1827
 */
1828
__isl_give isl_schedule_node *isl_schedule_node_band_shift(
1829
  __isl_take isl_schedule_node *node,
1830
  __isl_take isl_multi_union_pw_aff *shift)
1831
0
{
1832
0
  isl_schedule_tree *tree;
1833
0
  int anchored;
1834
0
1835
0
  if (!node || !shift)
1836
0
    goto error;
1837
0
  if (check_space_multi_union_pw_aff(node, shift) < 0)
1838
0
    goto error;
1839
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
1840
0
  if (anchored < 0)
1841
0
    goto error;
1842
0
  if (anchored)
1843
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1844
0
      "cannot shift band node with anchored subtree",
1845
0
      goto error);
1846
0
1847
0
  tree = isl_schedule_node_get_tree(node);
1848
0
  tree = isl_schedule_tree_band_shift(tree, shift);
1849
0
  return isl_schedule_node_graft_tree(node, tree);
1850
0
error:
1851
0
  isl_multi_union_pw_aff_free(shift);
1852
0
  isl_schedule_node_free(node);
1853
0
  return NULL;
1854
0
}
1855
1856
/* Tile "node" with tile sizes "sizes".
1857
 *
1858
 * The current node is replaced by two nested nodes corresponding
1859
 * to the tile dimensions and the point dimensions.
1860
 *
1861
 * Return a pointer to the outer (tile) node.
1862
 *
1863
 * If any of the descendants of "node" depend on the set of outer band nodes,
1864
 * then we refuse to tile the node.
1865
 *
1866
 * If the scale tile loops option is set, then the tile loops
1867
 * are scaled by the tile sizes.  If the shift point loops option is set,
1868
 * then the point loops are shifted to start at zero.
1869
 * In particular, these options affect the tile and point loop schedules
1870
 * as follows
1871
 *
1872
 *  scale shift original  tile    point
1873
 *
1874
 *  0 0 i   floor(i/s)  i
1875
 *  1 0 i   s * floor(i/s)  i
1876
 *  0 1 i   floor(i/s)  i - s * floor(i/s)
1877
 *  1 1 i   s * floor(i/s)  i - s * floor(i/s)
1878
 */
1879
__isl_give isl_schedule_node *isl_schedule_node_band_tile(
1880
  __isl_take isl_schedule_node *node, __isl_take isl_multi_val *sizes)
1881
76
{
1882
76
  isl_schedule_tree *tree;
1883
76
  int anchored;
1884
76
1885
76
  if (!node || !sizes)
1886
0
    goto error;
1887
76
  anchored = isl_schedule_node_is_subtree_anchored(node);
1888
76
  if (anchored < 0)
1889
0
    goto error;
1890
76
  if (anchored)
1891
76
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1892
76
      "cannot tile band node with anchored subtree",
1893
76
      goto error);
1894
76
1895
76
  if (check_space_multi_val(node, sizes) < 0)
1896
0
    goto error;
1897
76
1898
76
  tree = isl_schedule_node_get_tree(node);
1899
76
  tree = isl_schedule_tree_band_tile(tree, sizes);
1900
76
  return isl_schedule_node_graft_tree(node, tree);
1901
0
error:
1902
0
  isl_multi_val_free(sizes);
1903
0
  isl_schedule_node_free(node);
1904
0
  return NULL;
1905
76
}
1906
1907
/* Move the band node "node" down to all the leaves in the subtree
1908
 * rooted at "node".
1909
 * Return a pointer to the node in the resulting tree that is in the same
1910
 * position as the node pointed to by "node" in the original tree.
1911
 *
1912
 * If the node only has a leaf child, then nothing needs to be done.
1913
 * Otherwise, the child of the node is removed and the result is
1914
 * appended to all the leaves in the subtree rooted at the original child.
1915
 * Since the node is moved to the leaves, it needs to be expanded
1916
 * according to the expansion, if any, defined by that subtree.
1917
 * In the end, the original node is replaced by the result of
1918
 * attaching copies of the expanded node to the leaves.
1919
 *
1920
 * If any of the nodes in the subtree rooted at "node" depend on
1921
 * the set of outer band nodes then we refuse to sink the band node.
1922
 */
1923
__isl_give isl_schedule_node *isl_schedule_node_band_sink(
1924
  __isl_take isl_schedule_node *node)
1925
15
{
1926
15
  enum isl_schedule_node_type type;
1927
15
  isl_schedule_tree *tree, *child;
1928
15
  isl_union_pw_multi_aff *contraction;
1929
15
  int anchored;
1930
15
1931
15
  if (!node)
1932
0
    return NULL;
1933
15
1934
15
  type = isl_schedule_node_get_type(node);
1935
15
  if (type != isl_schedule_node_band)
1936
15
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1937
15
      "not a band node", return isl_schedule_node_free(node));
1938
15
  anchored = isl_schedule_node_is_subtree_anchored(node);
1939
15
  if (anchored < 0)
1940
0
    return isl_schedule_node_free(node);
1941
15
  if (anchored)
1942
15
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1943
15
      "cannot sink band node in anchored subtree",
1944
15
      return isl_schedule_node_free(node));
1945
15
  if (isl_schedule_tree_n_children(node->tree) == 0)
1946
8
    return node;
1947
7
1948
7
  contraction = isl_schedule_node_get_subtree_contraction(node);
1949
7
1950
7
  tree = isl_schedule_node_get_tree(node);
1951
7
  child = isl_schedule_tree_get_child(tree, 0);
1952
7
  tree = isl_schedule_tree_reset_children(tree);
1953
7
  tree = isl_schedule_tree_pullback_union_pw_multi_aff(tree, contraction);
1954
7
  tree = isl_schedule_tree_append_to_leaves(child, tree);
1955
7
1956
7
  return isl_schedule_node_graft_tree(node, tree);
1957
7
}
1958
1959
/* Split "node" into two nested band nodes, one with the first "pos"
1960
 * dimensions and one with the remaining dimensions.
1961
 * The schedules of the two band nodes live in anonymous spaces.
1962
 * The loop AST generation type options and the isolate option
1963
 * are split over the the two band nodes.
1964
 */
1965
__isl_give isl_schedule_node *isl_schedule_node_band_split(
1966
  __isl_take isl_schedule_node *node, int pos)
1967
33
{
1968
33
  int depth;
1969
33
  isl_schedule_tree *tree;
1970
33
1971
33
  depth = isl_schedule_node_get_schedule_depth(node);
1972
33
  tree = isl_schedule_node_get_tree(node);
1973
33
  tree = isl_schedule_tree_band_split(tree, pos, depth);
1974
33
  return isl_schedule_node_graft_tree(node, tree);
1975
33
}
1976
1977
/* Return the context of the context node "node".
1978
 */
1979
__isl_give isl_set *isl_schedule_node_context_get_context(
1980
  __isl_keep isl_schedule_node *node)
1981
0
{
1982
0
  if (!node)
1983
0
    return NULL;
1984
0
1985
0
  return isl_schedule_tree_context_get_context(node->tree);
1986
0
}
1987
1988
/* Return the domain of the domain node "node".
1989
 */
1990
__isl_give isl_union_set *isl_schedule_node_domain_get_domain(
1991
  __isl_keep isl_schedule_node *node)
1992
445
{
1993
445
  if (!node)
1994
0
    return NULL;
1995
445
1996
445
  return isl_schedule_tree_domain_get_domain(node->tree);
1997
445
}
1998
1999
/* Return the expansion map of expansion node "node".
2000
 */
2001
__isl_give isl_union_map *isl_schedule_node_expansion_get_expansion(
2002
  __isl_keep isl_schedule_node *node)
2003
2
{
2004
2
  if (!node)
2005
0
    return NULL;
2006
2
2007
2
  return isl_schedule_tree_expansion_get_expansion(node->tree);
2008
2
}
2009
2010
/* Return the contraction of expansion node "node".
2011
 */
2012
__isl_give isl_union_pw_multi_aff *isl_schedule_node_expansion_get_contraction(
2013
  __isl_keep isl_schedule_node *node)
2014
0
{
2015
0
  if (!node)
2016
0
    return NULL;
2017
0
2018
0
  return isl_schedule_tree_expansion_get_contraction(node->tree);
2019
0
}
2020
2021
/* Replace the contraction and the expansion of the expansion node "node"
2022
 * by "contraction" and "expansion".
2023
 */
2024
__isl_give isl_schedule_node *
2025
isl_schedule_node_expansion_set_contraction_and_expansion(
2026
  __isl_take isl_schedule_node *node,
2027
  __isl_take isl_union_pw_multi_aff *contraction,
2028
  __isl_take isl_union_map *expansion)
2029
0
{
2030
0
  isl_schedule_tree *tree;
2031
0
2032
0
  if (!node || !contraction || !expansion)
2033
0
    goto error;
2034
0
2035
0
  tree = isl_schedule_tree_copy(node->tree);
2036
0
  tree = isl_schedule_tree_expansion_set_contraction_and_expansion(tree,
2037
0
              contraction, expansion);
2038
0
  return isl_schedule_node_graft_tree(node, tree);
2039
0
error:
2040
0
  isl_schedule_node_free(node);
2041
0
  isl_union_pw_multi_aff_free(contraction);
2042
0
  isl_union_map_free(expansion);
2043
0
  return NULL;
2044
0
}
2045
2046
/* Return the extension of the extension node "node".
2047
 */
2048
__isl_give isl_union_map *isl_schedule_node_extension_get_extension(
2049
  __isl_keep isl_schedule_node *node)
2050
46
{
2051
46
  if (!node)
2052
0
    return NULL;
2053
46
2054
46
  return isl_schedule_tree_extension_get_extension(node->tree);
2055
46
}
2056
2057
/* Replace the extension of extension node "node" by "extension".
2058
 */
2059
__isl_give isl_schedule_node *isl_schedule_node_extension_set_extension(
2060
  __isl_take isl_schedule_node *node, __isl_take isl_union_map *extension)
2061
0
{
2062
0
  isl_schedule_tree *tree;
2063
0
2064
0
  if (!node || !extension)
2065
0
    goto error;
2066
0
2067
0
  tree = isl_schedule_tree_copy(node->tree);
2068
0
  tree = isl_schedule_tree_extension_set_extension(tree, extension);
2069
0
  return isl_schedule_node_graft_tree(node, tree);
2070
0
error:
2071
0
  isl_schedule_node_free(node);
2072
0
  isl_union_map_free(extension);
2073
0
  return NULL;
2074
0
}
2075
2076
/* Return the filter of the filter node "node".
2077
 */
2078
__isl_give isl_union_set *isl_schedule_node_filter_get_filter(
2079
  __isl_keep isl_schedule_node *node)
2080
2.28k
{
2081
2.28k
  if (!node)
2082
0
    return NULL;
2083
2.28k
2084
2.28k
  return isl_schedule_tree_filter_get_filter(node->tree);
2085
2.28k
}
2086
2087
/* Replace the filter of filter node "node" by "filter".
2088
 */
2089
__isl_give isl_schedule_node *isl_schedule_node_filter_set_filter(
2090
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
2091
859
{
2092
859
  isl_schedule_tree *tree;
2093
859
2094
859
  if (!node || !filter)
2095
0
    goto error;
2096
859
2097
859
  tree = isl_schedule_tree_copy(node->tree);
2098
859
  tree = isl_schedule_tree_filter_set_filter(tree, filter);
2099
859
  return isl_schedule_node_graft_tree(node, tree);
2100
0
error:
2101
0
  isl_schedule_node_free(node);
2102
0
  isl_union_set_free(filter);
2103
0
  return NULL;
2104
859
}
2105
2106
/* Intersect the filter of filter node "node" with "filter".
2107
 *
2108
 * If the filter of the node is already a subset of "filter",
2109
 * then leave the node unchanged.
2110
 */
2111
__isl_give isl_schedule_node *isl_schedule_node_filter_intersect_filter(
2112
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
2113
0
{
2114
0
  isl_union_set *node_filter = NULL;
2115
0
  isl_bool subset;
2116
0
2117
0
  if (!node || !filter)
2118
0
    goto error;
2119
0
2120
0
  node_filter = isl_schedule_node_filter_get_filter(node);
2121
0
  subset = isl_union_set_is_subset(node_filter, filter);
2122
0
  if (subset < 0)
2123
0
    goto error;
2124
0
  if (subset) {
2125
0
    isl_union_set_free(node_filter);
2126
0
    isl_union_set_free(filter);
2127
0
    return node;
2128
0
  }
2129
0
  node_filter = isl_union_set_intersect(node_filter, filter);
2130
0
  node = isl_schedule_node_filter_set_filter(node, node_filter);
2131
0
  return node;
2132
0
error:
2133
0
  isl_schedule_node_free(node);
2134
0
  isl_union_set_free(node_filter);
2135
0
  isl_union_set_free(filter);
2136
0
  return NULL;
2137
0
}
2138
2139
/* Return the guard of the guard node "node".
2140
 */
2141
__isl_give isl_set *isl_schedule_node_guard_get_guard(
2142
  __isl_keep isl_schedule_node *node)
2143
0
{
2144
0
  if (!node)
2145
0
    return NULL;
2146
0
2147
0
  return isl_schedule_tree_guard_get_guard(node->tree);
2148
0
}
2149
2150
/* Return the mark identifier of the mark node "node".
2151
 */
2152
__isl_give isl_id *isl_schedule_node_mark_get_id(
2153
  __isl_keep isl_schedule_node *node)
2154
154
{
2155
154
  if (!node)
2156
0
    return NULL;
2157
154
2158
154
  return isl_schedule_tree_mark_get_id(node->tree);
2159
154
}
2160
2161
/* Replace the child at position "pos" of the sequence node "node"
2162
 * by the children of sequence root node of "tree".
2163
 */
2164
__isl_give isl_schedule_node *isl_schedule_node_sequence_splice(
2165
  __isl_take isl_schedule_node *node, int pos,
2166
  __isl_take isl_schedule_tree *tree)
2167
0
{
2168
0
  isl_schedule_tree *node_tree;
2169
0
2170
0
  if (!node || !tree)
2171
0
    goto error;
2172
0
  if (isl_schedule_node_get_type(node) != isl_schedule_node_sequence)
2173
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
2174
0
      "not a sequence node", goto error);
2175
0
  if (isl_schedule_tree_get_type(tree) != isl_schedule_node_sequence)
2176
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
2177
0
      "not a sequence node", goto error);
2178
0
  node_tree = isl_schedule_node_get_tree(node);
2179
0
  node_tree = isl_schedule_tree_sequence_splice(node_tree, pos, tree);
2180
0
  node = isl_schedule_node_graft_tree(node, node_tree);
2181
0
2182
0
  return node;
2183
0
error:
2184
0
  isl_schedule_node_free(node);
2185
0
  isl_schedule_tree_free(tree);
2186
0
  return NULL;
2187
0
}
2188
2189
/* Given a sequence node "node", with a child at position "pos" that
2190
 * is also a sequence node, attach the children of that node directly
2191
 * as children of "node" at that position, replacing the original child.
2192
 *
2193
 * The filters of these children are intersected with the filter
2194
 * of the child at position "pos".
2195
 */
2196
__isl_give isl_schedule_node *isl_schedule_node_sequence_splice_child(
2197
  __isl_take isl_schedule_node *node, int pos)
2198
0
{
2199
0
  int i, n;
2200
0
  isl_union_set *filter;
2201
0
  isl_schedule_node *child;
2202
0
  isl_schedule_tree *tree;
2203
0
2204
0
  if (!node)
2205
0
    return NULL;
2206
0
  if (isl_schedule_node_get_type(node) != isl_schedule_node_sequence)
2207
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
2208
0
      "not a sequence node",
2209
0
      return isl_schedule_node_free(node));
2210
0
  node = isl_schedule_node_child(node, pos);
2211
0
  node = isl_schedule_node_child(node, 0);
2212
0
  if (isl_schedule_node_get_type(node) != isl_schedule_node_sequence)
2213
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
2214
0
      "not a sequence node",
2215
0
      return isl_schedule_node_free(node));
2216
0
  child = isl_schedule_node_copy(node);
2217
0
  node = isl_schedule_node_parent(node);
2218
0
  filter = isl_schedule_node_filter_get_filter(node);
2219
0
  n = isl_schedule_node_n_children(child);
2220
0
  for (i = 0; i < n; ++i) {
2221
0
    child = isl_schedule_node_child(child, i);
2222
0
    child = isl_schedule_node_filter_intersect_filter(child,
2223
0
            isl_union_set_copy(filter));
2224
0
    child = isl_schedule_node_parent(child);
2225
0
  }
2226
0
  isl_union_set_free(filter);
2227
0
  tree = isl_schedule_node_get_tree(child);
2228
0
  isl_schedule_node_free(child);
2229
0
  node = isl_schedule_node_parent(node);
2230
0
  node = isl_schedule_node_sequence_splice(node, pos, tree);
2231
0
2232
0
  return node;
2233
0
}
2234
2235
/* Update the ancestors of "node" to point to the tree that "node"
2236
 * now points to.
2237
 * That is, replace the child in the original parent that corresponds
2238
 * to the current tree position by node->tree and continue updating
2239
 * the ancestors in the same way until the root is reached.
2240
 *
2241
 * If "fn" is not NULL, then it is called on each ancestor as we move up
2242
 * the tree so that it can modify the ancestor before it is added
2243
 * to the list of ancestors of the modified node.
2244
 * The additional "pos" argument records the position
2245
 * of the "tree" argument in the original schedule tree.
2246
 *
2247
 * If "node" originally points to a leaf of the schedule tree, then make sure
2248
 * that in the end it points to a leaf in the updated schedule tree.
2249
 */
2250
static __isl_give isl_schedule_node *update_ancestors(
2251
  __isl_take isl_schedule_node *node,
2252
  __isl_give isl_schedule_tree *(*fn)(__isl_take isl_schedule_tree *tree,
2253
    __isl_keep isl_schedule_node *pos, void *user), void *user)
2254
6.46k
{
2255
6.46k
  int i, n;
2256
6.46k
  int is_leaf;
2257
6.46k
  isl_schedule_tree *tree;
2258
6.46k
  isl_schedule_node *pos = NULL;
2259
6.46k
2260
6.46k
  if (fn)
2261
3
    pos = isl_schedule_node_copy(node);
2262
6.46k
2263
6.46k
  node = isl_schedule_node_cow(node);
2264
6.46k
  if (!node)
2265
0
    return isl_schedule_node_free(pos);
2266
6.46k
2267
6.46k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
2268
6.46k
  tree = isl_schedule_tree_copy(node->tree);
2269
6.46k
2270
17.3k
  for (i = n - 1; i >= 0; 
--i10.9k
) {
2271
10.9k
    isl_schedule_tree *parent;
2272
10.9k
2273
10.9k
    parent = isl_schedule_tree_list_get_schedule_tree(
2274
10.9k
                node->ancestors, i);
2275
10.9k
    parent = isl_schedule_tree_replace_child(parent,
2276
10.9k
                node->child_pos[i], tree);
2277
10.9k
    if (fn) {
2278
9
      pos = isl_schedule_node_parent(pos);
2279
9
      parent = fn(parent, pos, user);
2280
9
    }
2281
10.9k
    node->ancestors = isl_schedule_tree_list_set_schedule_tree(
2282
10.9k
          node->ancestors, i, isl_schedule_tree_copy(parent));
2283
10.9k
2284
10.9k
    tree = parent;
2285
10.9k
  }
2286
6.46k
2287
6.46k
  if (fn)
2288
3
    isl_schedule_node_free(pos);
2289
6.46k
2290
6.46k
  is_leaf = isl_schedule_tree_is_leaf(node->tree);
2291
6.46k
  node->schedule = isl_schedule_set_root(node->schedule, tree);
2292
6.46k
  if (is_leaf) {
2293
163
    isl_schedule_tree_free(node->tree);
2294
163
    node->tree = isl_schedule_node_get_leaf(node);
2295
163
  }
2296
6.46k
2297
6.46k
  if (!node->schedule || !node->ancestors)
2298
0
    return isl_schedule_node_free(node);
2299
6.46k
2300
6.46k
  return node;
2301
6.46k
}
2302
2303
/* Replace the subtree that "pos" points to by "tree", updating
2304
 * the ancestors to maintain a consistent state.
2305
 */
2306
__isl_give isl_schedule_node *isl_schedule_node_graft_tree(
2307
  __isl_take isl_schedule_node *pos, __isl_take isl_schedule_tree *tree)
2308
7.14k
{
2309
7.14k
  if (!tree || !pos)
2310
0
    goto error;
2311
7.14k
  if (pos->tree == tree) {
2312
688
    isl_schedule_tree_free(tree);
2313
688
    return pos;
2314
688
  }
2315
6.45k
2316
6.45k
  pos = isl_schedule_node_cow(pos);
2317
6.45k
  if (!pos)
2318
0
    goto error;
2319
6.45k
2320
6.45k
  isl_schedule_tree_free(pos->tree);
2321
6.45k
  pos->tree = tree;
2322
6.45k
2323
6.45k
  return update_ancestors(pos, NULL, NULL);
2324
0
error:
2325
0
  isl_schedule_node_free(pos);
2326
0
  isl_schedule_tree_free(tree);
2327
0
  return NULL;
2328
7.14k
}
2329
2330
/* Make sure we can insert a node between "node" and its parent.
2331
 * Return -1 on error, reporting the reason why we cannot insert a node.
2332
 */
2333
static int check_insert(__isl_keep isl_schedule_node *node)
2334
2.25k
{
2335
2.25k
  int has_parent;
2336
2.25k
  enum isl_schedule_node_type type;
2337
2.25k
2338
2.25k
  has_parent = isl_schedule_node_has_parent(node);
2339
2.25k
  if (has_parent < 0)
2340
0
    return -1;
2341
2.25k
  if (!has_parent)
2342
2.25k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2343
2.25k
      "cannot insert node outside of root", return -1);
2344
2.25k
2345
2.25k
  type = isl_schedule_node_get_parent_type(node);
2346
2.25k
  if (type == isl_schedule_node_error)
2347
0
    return -1;
2348
2.25k
  if (type == isl_schedule_node_set || type == isl_schedule_node_sequence)
2349
2.25k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2350
2.25k
      "cannot insert node between set or sequence node "
2351
2.25k
      "and its filter children", return -1);
2352
2.25k
2353
2.25k
  return 0;
2354
2.25k
}
2355
2356
/* Insert a band node with partial schedule "mupa" between "node" and
2357
 * its parent.
2358
 * Return a pointer to the new band node.
2359
 *
2360
 * If any of the nodes in the subtree rooted at "node" depend on
2361
 * the set of outer band nodes then we refuse to insert the band node.
2362
 */
2363
__isl_give isl_schedule_node *isl_schedule_node_insert_partial_schedule(
2364
  __isl_take isl_schedule_node *node,
2365
  __isl_take isl_multi_union_pw_aff *mupa)
2366
1.97k
{
2367
1.97k
  int anchored;
2368
1.97k
  isl_schedule_band *band;
2369
1.97k
  isl_schedule_tree *tree;
2370
1.97k
2371
1.97k
  if (check_insert(node) < 0)
2372
0
    node = isl_schedule_node_free(node);
2373
1.97k
  anchored = isl_schedule_node_is_subtree_anchored(node);
2374
1.97k
  if (anchored < 0)
2375
0
    goto error;
2376
1.97k
  if (anchored)
2377
1.97k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2378
1.97k
      "cannot insert band node in anchored subtree",
2379
1.97k
      goto error);
2380
1.97k
2381
1.97k
  tree = isl_schedule_node_get_tree(node);
2382
1.97k
  band = isl_schedule_band_from_multi_union_pw_aff(mupa);
2383
1.97k
  tree = isl_schedule_tree_insert_band(tree, band);
2384
1.97k
  node = isl_schedule_node_graft_tree(node, tree);
2385
1.97k
2386
1.97k
  return node;
2387
0
error:
2388
0
  isl_schedule_node_free(node);
2389
0
  isl_multi_union_pw_aff_free(mupa);
2390
0
  return NULL;
2391
1.97k
}
2392
2393
/* Insert a context node with context "context" between "node" and its parent.
2394
 * Return a pointer to the new context node.
2395
 */
2396
__isl_give isl_schedule_node *isl_schedule_node_insert_context(
2397
  __isl_take isl_schedule_node *node, __isl_take isl_set *context)
2398
0
{
2399
0
  isl_schedule_tree *tree;
2400
0
2401
0
  if (check_insert(node) < 0)
2402
0
    node = isl_schedule_node_free(node);
2403
0
2404
0
  tree = isl_schedule_node_get_tree(node);
2405
0
  tree = isl_schedule_tree_insert_context(tree, context);
2406
0
  node = isl_schedule_node_graft_tree(node, tree);
2407
0
2408
0
  return node;
2409
0
}
2410
2411
/* Insert an expansion node with the given "contraction" and "expansion"
2412
 * between "node" and its parent.
2413
 * Return a pointer to the new expansion node.
2414
 *
2415
 * Typically the domain and range spaces of the expansion are different.
2416
 * This means that only one of them can refer to the current domain space
2417
 * in a consistent tree.  It is up to the caller to ensure that the tree
2418
 * returns to a consistent state.
2419
 */
2420
__isl_give isl_schedule_node *isl_schedule_node_insert_expansion(
2421
  __isl_take isl_schedule_node *node,
2422
  __isl_take isl_union_pw_multi_aff *contraction,
2423
  __isl_take isl_union_map *expansion)
2424
3
{
2425
3
  isl_schedule_tree *tree;
2426
3
2427
3
  if (check_insert(node) < 0)
2428
0
    node = isl_schedule_node_free(node);
2429
3
2430
3
  tree = isl_schedule_node_get_tree(node);
2431
3
  tree = isl_schedule_tree_insert_expansion(tree, contraction, expansion);
2432
3
  node = isl_schedule_node_graft_tree(node, tree);
2433
3
2434
3
  return node;
2435
3
}
2436
2437
/* Insert an extension node with extension "extension" between "node" and
2438
 * its parent.
2439
 * Return a pointer to the new extension node.
2440
 */
2441
__isl_give isl_schedule_node *isl_schedule_node_insert_extension(
2442
  __isl_take isl_schedule_node *node,
2443
  __isl_take isl_union_map *extension)
2444
24
{
2445
24
  isl_schedule_tree *tree;
2446
24
2447
24
  tree = isl_schedule_node_get_tree(node);
2448
24
  tree = isl_schedule_tree_insert_extension(tree, extension);
2449
24
  node = isl_schedule_node_graft_tree(node, tree);
2450
24
2451
24
  return node;
2452
24
}
2453
2454
/* Insert a filter node with filter "filter" between "node" and its parent.
2455
 * Return a pointer to the new filter node.
2456
 */
2457
__isl_give isl_schedule_node *isl_schedule_node_insert_filter(
2458
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
2459
24
{
2460
24
  isl_schedule_tree *tree;
2461
24
2462
24
  if (check_insert(node) < 0)
2463
0
    node = isl_schedule_node_free(node);
2464
24
2465
24
  tree = isl_schedule_node_get_tree(node);
2466
24
  tree = isl_schedule_tree_insert_filter(tree, filter);
2467
24
  node = isl_schedule_node_graft_tree(node, tree);
2468
24
2469
24
  return node;
2470
24
}
2471
2472
/* Insert a guard node with guard "guard" between "node" and its parent.
2473
 * Return a pointer to the new guard node.
2474
 */
2475
__isl_give isl_schedule_node *isl_schedule_node_insert_guard(
2476
  __isl_take isl_schedule_node *node, __isl_take isl_set *guard)
2477
0
{
2478
0
  isl_schedule_tree *tree;
2479
0
2480
0
  if (check_insert(node) < 0)
2481
0
    node = isl_schedule_node_free(node);
2482
0
2483
0
  tree = isl_schedule_node_get_tree(node);
2484
0
  tree = isl_schedule_tree_insert_guard(tree, guard);
2485
0
  node = isl_schedule_node_graft_tree(node, tree);
2486
0
2487
0
  return node;
2488
0
}
2489
2490
/* Insert a mark node with mark identifier "mark" between "node" and
2491
 * its parent.
2492
 * Return a pointer to the new mark node.
2493
 */
2494
__isl_give isl_schedule_node *isl_schedule_node_insert_mark(
2495
  __isl_take isl_schedule_node *node, __isl_take isl_id *mark)
2496
155
{
2497
155
  isl_schedule_tree *tree;
2498
155
2499
155
  if (check_insert(node) < 0)
2500
0
    node = isl_schedule_node_free(node);
2501
155
2502
155
  tree = isl_schedule_node_get_tree(node);
2503
155
  tree = isl_schedule_tree_insert_mark(tree, mark);
2504
155
  node = isl_schedule_node_graft_tree(node, tree);
2505
155
2506
155
  return node;
2507
155
}
2508
2509
/* Attach the current subtree of "node" to a sequence of filter tree nodes
2510
 * with filters described by "filters", attach this sequence
2511
 * of filter tree nodes as children to a new tree of type "type" and
2512
 * replace the original subtree of "node" by this new tree.
2513
 * Each copy of the original subtree is simplified with respect
2514
 * to the corresponding filter.
2515
 */
2516
static __isl_give isl_schedule_node *isl_schedule_node_insert_children(
2517
  __isl_take isl_schedule_node *node,
2518
  enum isl_schedule_node_type type,
2519
  __isl_take isl_union_set_list *filters)
2520
76
{
2521
76
  int i, n;
2522
76
  isl_ctx *ctx;
2523
76
  isl_schedule_tree *tree;
2524
76
  isl_schedule_tree_list *list;
2525
76
2526
76
  if (check_insert(node) < 0)
2527
0
    node = isl_schedule_node_free(node);
2528
76
2529
76
  if (!node || !filters)
2530
0
    goto error;
2531
76
2532
76
  ctx = isl_schedule_node_get_ctx(node);
2533
76
  n = isl_union_set_list_n_union_set(filters);
2534
76
  list = isl_schedule_tree_list_alloc(ctx, n);
2535
248
  for (i = 0; i < n; 
++i172
) {
2536
172
    isl_schedule_node *node_i;
2537
172
    isl_schedule_tree *tree;
2538
172
    isl_union_set *filter;
2539
172
2540
172
    filter = isl_union_set_list_get_union_set(filters, i);
2541
172
    node_i = isl_schedule_node_copy(node);
2542
172
    node_i = isl_schedule_node_gist(node_i,
2543
172
            isl_union_set_copy(filter));
2544
172
    tree = isl_schedule_node_get_tree(node_i);
2545
172
    isl_schedule_node_free(node_i);
2546
172
    tree = isl_schedule_tree_insert_filter(tree, filter);
2547
172
    list = isl_schedule_tree_list_add(list, tree);
2548
172
  }
2549
76
  tree = isl_schedule_tree_from_children(type, list);
2550
76
  node = isl_schedule_node_graft_tree(node, tree);
2551
76
2552
76
  isl_union_set_list_free(filters);
2553
76
  return node;
2554
0
error:
2555
0
  isl_union_set_list_free(filters);
2556
0
  isl_schedule_node_free(node);
2557
0
  return NULL;
2558
76
}
2559
2560
/* Insert a sequence node with child filters "filters" between "node" and
2561
 * its parent.  That is, the tree that "node" points to is attached
2562
 * to each of the child nodes of the filter nodes.
2563
 * Return a pointer to the new sequence node.
2564
 */
2565
__isl_give isl_schedule_node *isl_schedule_node_insert_sequence(
2566
  __isl_take isl_schedule_node *node,
2567
  __isl_take isl_union_set_list *filters)
2568
67
{
2569
67
  return isl_schedule_node_insert_children(node,
2570
67
          isl_schedule_node_sequence, filters);
2571
67
}
2572
2573
/* Insert a set node with child filters "filters" between "node" and
2574
 * its parent.  That is, the tree that "node" points to is attached
2575
 * to each of the child nodes of the filter nodes.
2576
 * Return a pointer to the new set node.
2577
 */
2578
__isl_give isl_schedule_node *isl_schedule_node_insert_set(
2579
  __isl_take isl_schedule_node *node,
2580
  __isl_take isl_union_set_list *filters)
2581
9
{
2582
9
  return isl_schedule_node_insert_children(node,
2583
9
          isl_schedule_node_set, filters);
2584
9
}
2585
2586
/* Remove "node" from its schedule tree and return a pointer
2587
 * to the leaf at the same position in the updated schedule tree.
2588
 *
2589
 * It is not allowed to remove the root of a schedule tree or
2590
 * a child of a set or sequence node.
2591
 */
2592
__isl_give isl_schedule_node *isl_schedule_node_cut(
2593
  __isl_take isl_schedule_node *node)
2594
336
{
2595
336
  isl_schedule_tree *leaf;
2596
336
  enum isl_schedule_node_type parent_type;
2597
336
2598
336
  if (!node)
2599
0
    return NULL;
2600
336
  if (!isl_schedule_node_has_parent(node))
2601
336
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2602
336
      "cannot cut root", return isl_schedule_node_free(node));
2603
336
2604
336
  parent_type = isl_schedule_node_get_parent_type(node);
2605
336
  if (parent_type == isl_schedule_node_set ||
2606
336
      parent_type == isl_schedule_node_sequence)
2607
336
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2608
336
      "cannot cut child of set or sequence",
2609
336
      return isl_schedule_node_free(node));
2610
336
2611
336
  leaf = isl_schedule_node_get_leaf(node);
2612
336
  return isl_schedule_node_graft_tree(node, leaf);
2613
336
}
2614
2615
/* Remove a single node from the schedule tree, attaching the child
2616
 * of "node" directly to its parent.
2617
 * Return a pointer to this former child or to the leaf the position
2618
 * of the original node if there was no child.
2619
 * It is not allowed to remove the root of a schedule tree,
2620
 * a set or sequence node, a child of a set or sequence node or
2621
 * a band node with an anchored subtree.
2622
 */
2623
__isl_give isl_schedule_node *isl_schedule_node_delete(
2624
  __isl_take isl_schedule_node *node)
2625
360
{
2626
360
  int n;
2627
360
  isl_schedule_tree *tree;
2628
360
  enum isl_schedule_node_type type;
2629
360
2630
360
  if (!node)
2631
0
    return NULL;
2632
360
2633
360
  if (isl_schedule_node_get_tree_depth(node) == 0)
2634
360
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2635
360
      "cannot delete root node",
2636
360
      return isl_schedule_node_free(node));
2637
360
  n = isl_schedule_node_n_children(node);
2638
360
  if (n != 1)
2639
360
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2640
360
      "can only delete node with a single child",
2641
360
      return isl_schedule_node_free(node));
2642
360
  type = isl_schedule_node_get_parent_type(node);
2643
360
  if (type == isl_schedule_node_sequence || type == isl_schedule_node_set)
2644
360
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2645
360
      "cannot delete child of set or sequence",
2646
360
      return isl_schedule_node_free(node));
2647
360
  if (isl_schedule_node_get_type(node) == isl_schedule_node_band) {
2648
94
    int anchored;
2649
94
2650
94
    anchored = isl_schedule_node_is_subtree_anchored(node);
2651
94
    if (anchored < 0)
2652
0
      return isl_schedule_node_free(node);
2653
94
    if (anchored)
2654
94
      
isl_die0
(isl_schedule_node_get_ctx(node),
2655
94
        isl_error_invalid,
2656
94
        "cannot delete band node with anchored subtree",
2657
94
        return isl_schedule_node_free(node));
2658
94
  }
2659
360
2660
360
  tree = isl_schedule_node_get_tree(node);
2661
360
  if (!tree || isl_schedule_tree_has_children(tree)) {
2662
237
    tree = isl_schedule_tree_child(tree, 0);
2663
237
  } else {
2664
123
    isl_schedule_tree_free(tree);
2665
123
    tree = isl_schedule_node_get_leaf(node);
2666
123
  }
2667
360
  node = isl_schedule_node_graft_tree(node, tree);
2668
360
2669
360
  return node;
2670
360
}
2671
2672
/* Internal data structure for the group_ancestor callback.
2673
 *
2674
 * If "finished" is set, then we no longer need to modify
2675
 * any further ancestors.
2676
 *
2677
 * "contraction" and "expansion" represent the expansion
2678
 * that reflects the grouping.
2679
 *
2680
 * "domain" contains the domain elements that reach the position
2681
 * where the grouping is performed.  That is, it is the range
2682
 * of the resulting expansion.
2683
 * "domain_universe" is the universe of "domain".
2684
 * "group" is the set of group elements, i.e., the domain
2685
 * of the resulting expansion.
2686
 * "group_universe" is the universe of "group".
2687
 *
2688
 * "sched" is the schedule for the group elements, in pratice
2689
 * an identity mapping on "group_universe".
2690
 * "dim" is the dimension of "sched".
2691
 */
2692
struct isl_schedule_group_data {
2693
  int finished;
2694
2695
  isl_union_map *expansion;
2696
  isl_union_pw_multi_aff *contraction;
2697
2698
  isl_union_set *domain;
2699
  isl_union_set *domain_universe;
2700
  isl_union_set *group;
2701
  isl_union_set *group_universe;
2702
2703
  int dim;
2704
  isl_multi_aff *sched;
2705
};
2706
2707
/* Is domain covered by data->domain within data->domain_universe?
2708
 */
2709
static int locally_covered_by_domain(__isl_keep isl_union_set *domain,
2710
  struct isl_schedule_group_data *data)
2711
3
{
2712
3
  int is_subset;
2713
3
  isl_union_set *test;
2714
3
2715
3
  test = isl_union_set_copy(domain);
2716
3
  test = isl_union_set_intersect(test,
2717
3
          isl_union_set_copy(data->domain_universe));
2718
3
  is_subset = isl_union_set_is_subset(test, data->domain);
2719
3
  isl_union_set_free(test);
2720
3
2721
3
  return is_subset;
2722
3
}
2723
2724
/* Update the band tree root "tree" to refer to the group instances
2725
 * in data->group rather than the original domain elements in data->domain.
2726
 * "pos" is the position in the original schedule tree where the modified
2727
 * "tree" will be attached.
2728
 *
2729
 * Add the part of the identity schedule on the group instances data->sched
2730
 * that corresponds to this band node to the band schedule.
2731
 * If the domain elements that reach the node and that are part
2732
 * of data->domain_universe are all elements of data->domain (and therefore
2733
 * replaced by the group instances) then this data->domain_universe
2734
 * is removed from the domain of the band schedule.
2735
 */
2736
static __isl_give isl_schedule_tree *group_band(
2737
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
2738
  struct isl_schedule_group_data *data)
2739
2
{
2740
2
  isl_union_set *domain;
2741
2
  isl_multi_aff *ma;
2742
2
  isl_multi_union_pw_aff *mupa, *partial;
2743
2
  int is_covered;
2744
2
  int depth, n, has_id;
2745
2
2746
2
  domain = isl_schedule_node_get_domain(pos);
2747
2
  is_covered = locally_covered_by_domain(domain, data);
2748
2
  if (is_covered >= 0 && is_covered) {
2749
2
    domain = isl_union_set_universe(domain);
2750
2
    domain = isl_union_set_subtract(domain,
2751
2
          isl_union_set_copy(data->domain_universe));
2752
2
    tree = isl_schedule_tree_band_intersect_domain(tree, domain);
2753
2
  } else
2754
0
    isl_union_set_free(domain);
2755
2
  if (is_covered < 0)
2756
0
    return isl_schedule_tree_free(tree);
2757
2
  depth = isl_schedule_node_get_schedule_depth(pos);
2758
2
  n = isl_schedule_tree_band_n_member(tree);
2759
2
  ma = isl_multi_aff_copy(data->sched);
2760
2
  ma = isl_multi_aff_drop_dims(ma, isl_dim_out, 0, depth);
2761
2
  ma = isl_multi_aff_drop_dims(ma, isl_dim_out, n, data->dim - depth - n);
2762
2
  mupa = isl_multi_union_pw_aff_from_multi_aff(ma);
2763
2
  partial = isl_schedule_tree_band_get_partial_schedule(tree);
2764
2
  has_id = isl_multi_union_pw_aff_has_tuple_id(partial, isl_dim_set);
2765
2
  if (has_id < 0) {
2766
0
    partial = isl_multi_union_pw_aff_free(partial);
2767
2
  } else if (has_id) {
2768
0
    isl_id *id;
2769
0
    id = isl_multi_union_pw_aff_get_tuple_id(partial, isl_dim_set);
2770
0
    mupa = isl_multi_union_pw_aff_set_tuple_id(mupa,
2771
0
                  isl_dim_set, id);
2772
0
  }
2773
2
  partial = isl_multi_union_pw_aff_union_add(partial, mupa);
2774
2
  tree = isl_schedule_tree_band_set_partial_schedule(tree, partial);
2775
2
2776
2
  return tree;
2777
2
}
2778
2779
/* Drop the parameters in "uset" that are not also in "space".
2780
 * "n" is the number of parameters in "space".
2781
 */
2782
static __isl_give isl_union_set *union_set_drop_extra_params(
2783
  __isl_take isl_union_set *uset, __isl_keep isl_space *space, int n)
2784
0
{
2785
0
  int n2;
2786
0
2787
0
  uset = isl_union_set_align_params(uset, isl_space_copy(space));
2788
0
  n2 = isl_union_set_dim(uset, isl_dim_param);
2789
0
  uset = isl_union_set_project_out(uset, isl_dim_param, n, n2 - n);
2790
0
2791
0
  return uset;
2792
0
}
2793
2794
/* Update the context tree root "tree" to refer to the group instances
2795
 * in data->group rather than the original domain elements in data->domain.
2796
 * "pos" is the position in the original schedule tree where the modified
2797
 * "tree" will be attached.
2798
 *
2799
 * We do not actually need to update "tree" since a context node only
2800
 * refers to the schedule space.  However, we may need to update "data"
2801
 * to not refer to any parameters introduced by the context node.
2802
 */
2803
static __isl_give isl_schedule_tree *group_context(
2804
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
2805
  struct isl_schedule_group_data *data)
2806
0
{
2807
0
  isl_space *space;
2808
0
  isl_union_set *domain;
2809
0
  int n1, n2;
2810
0
  int involves;
2811
0
2812
0
  if (isl_schedule_node_get_tree_depth(pos) == 1)
2813
0
    return tree;
2814
0
2815
0
  domain = isl_schedule_node_get_universe_domain(pos);
2816
0
  space = isl_union_set_get_space(domain);
2817
0
  isl_union_set_free(domain);
2818
0
2819
0
  n1 = isl_space_dim(space, isl_dim_param);
2820
0
  data->expansion = isl_union_map_align_params(data->expansion, space);
2821
0
  n2 = isl_union_map_dim(data->expansion, isl_dim_param);
2822
0
2823
0
  if (!data->expansion)
2824
0
    return isl_schedule_tree_free(tree);
2825
0
  if (n1 == n2)
2826
0
    return tree;
2827
0
2828
0
  involves = isl_union_map_involves_dims(data->expansion,
2829
0
        isl_dim_param, n1, n2 - n1);
2830
0
  if (involves < 0)
2831
0
    return isl_schedule_tree_free(tree);
2832
0
  if (involves)
2833
0
    isl_die(isl_schedule_node_get_ctx(pos), isl_error_invalid,
2834
0
      "grouping cannot only refer to global parameters",
2835
0
      return isl_schedule_tree_free(tree));
2836
0
2837
0
  data->expansion = isl_union_map_project_out(data->expansion,
2838
0
        isl_dim_param, n1, n2 - n1);
2839
0
  space = isl_union_map_get_space(data->expansion);
2840
0
2841
0
  data->contraction = isl_union_pw_multi_aff_align_params(
2842
0
        data->contraction, isl_space_copy(space));
2843
0
  n2 = isl_union_pw_multi_aff_dim(data->contraction, isl_dim_param);
2844
0
  data->contraction = isl_union_pw_multi_aff_drop_dims(data->contraction,
2845
0
        isl_dim_param, n1, n2 - n1);
2846
0
2847
0
  data->domain = union_set_drop_extra_params(data->domain, space, n1);
2848
0
  data->domain_universe =
2849
0
    union_set_drop_extra_params(data->domain_universe, space, n1);
2850
0
  data->group = union_set_drop_extra_params(data->group, space, n1);
2851
0
  data->group_universe =
2852
0
    union_set_drop_extra_params(data->group_universe, space, n1);
2853
0
2854
0
  data->sched = isl_multi_aff_align_params(data->sched,
2855
0
        isl_space_copy(space));
2856
0
  n2 = isl_multi_aff_dim(data->sched, isl_dim_param);
2857
0
  data->sched = isl_multi_aff_drop_dims(data->sched,
2858
0
        isl_dim_param, n1, n2 - n1);
2859
0
2860
0
  isl_space_free(space);
2861
0
2862
0
  return tree;
2863
0
}
2864
2865
/* Update the domain tree root "tree" to refer to the group instances
2866
 * in data->group rather than the original domain elements in data->domain.
2867
 * "pos" is the position in the original schedule tree where the modified
2868
 * "tree" will be attached.
2869
 *
2870
 * We first double-check that all grouped domain elements are actually
2871
 * part of the root domain and then replace those elements by the group
2872
 * instances.
2873
 */
2874
static __isl_give isl_schedule_tree *group_domain(
2875
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
2876
  struct isl_schedule_group_data *data)
2877
2
{
2878
2
  isl_union_set *domain;
2879
2
  int is_subset;
2880
2
2881
2
  domain = isl_schedule_tree_domain_get_domain(tree);
2882
2
  is_subset = isl_union_set_is_subset(data->domain, domain);
2883
2
  isl_union_set_free(domain);
2884
2
  if (is_subset < 0)
2885
0
    return isl_schedule_tree_free(tree);
2886
2
  if (!is_subset)
2887
2
    
isl_die0
(isl_schedule_tree_get_ctx(tree), isl_error_internal,
2888
2
      "grouped domain should be part of outer domain",
2889
2
      return isl_schedule_tree_free(tree));
2890
2
  domain = isl_schedule_tree_domain_get_domain(tree);
2891
2
  domain = isl_union_set_subtract(domain,
2892
2
        isl_union_set_copy(data->domain));
2893
2
  domain = isl_union_set_union(domain, isl_union_set_copy(data->group));
2894
2
  tree = isl_schedule_tree_domain_set_domain(tree, domain);
2895
2
2896
2
  return tree;
2897
2
}
2898
2899
/* Update the expansion tree root "tree" to refer to the group instances
2900
 * in data->group rather than the original domain elements in data->domain.
2901
 * "pos" is the position in the original schedule tree where the modified
2902
 * "tree" will be attached.
2903
 *
2904
 * Let G_1 -> D_1 be the expansion of "tree" and G_2 -> D_2 the newly
2905
 * introduced expansion in a descendant of "tree".
2906
 * We first double-check that D_2 is a subset of D_1.
2907
 * Then we remove D_2 from the range of G_1 -> D_1 and add the mapping
2908
 * G_1 -> D_1 . D_2 -> G_2.
2909
 * Simmilarly, we restrict the domain of the contraction to the universe
2910
 * of the range of the updated expansion and add G_2 -> D_2 . D_1 -> G_1,
2911
 * attempting to remove the domain constraints of this additional part.
2912
 */
2913
static __isl_give isl_schedule_tree *group_expansion(
2914
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
2915
  struct isl_schedule_group_data *data)
2916
1
{
2917
1
  isl_union_set *domain;
2918
1
  isl_union_map *expansion, *umap;
2919
1
  isl_union_pw_multi_aff *contraction, *upma;
2920
1
  int is_subset;
2921
1
2922
1
  expansion = isl_schedule_tree_expansion_get_expansion(tree);
2923
1
  domain = isl_union_map_range(expansion);
2924
1
  is_subset = isl_union_set_is_subset(data->domain, domain);
2925
1
  isl_union_set_free(domain);
2926
1
  if (is_subset < 0)
2927
0
    return isl_schedule_tree_free(tree);
2928
1
  if (!is_subset)
2929
1
    
isl_die0
(isl_schedule_tree_get_ctx(tree), isl_error_internal,
2930
1
      "grouped domain should be part "
2931
1
      "of outer expansion domain",
2932
1
      return isl_schedule_tree_free(tree));
2933
1
  expansion = isl_schedule_tree_expansion_get_expansion(tree);
2934
1
  umap = isl_union_map_from_union_pw_multi_aff(
2935
1
      isl_union_pw_multi_aff_copy(data->contraction));
2936
1
  umap = isl_union_map_apply_range(expansion, umap);
2937
1
  expansion = isl_schedule_tree_expansion_get_expansion(tree);
2938
1
  expansion = isl_union_map_subtract_range(expansion,
2939
1
        isl_union_set_copy(data->domain));
2940
1
  expansion = isl_union_map_union(expansion, umap);
2941
1
  umap = isl_union_map_universe(isl_union_map_copy(expansion));
2942
1
  domain = isl_union_map_range(umap);
2943
1
  contraction = isl_schedule_tree_expansion_get_contraction(tree);
2944
1
  umap = isl_union_map_from_union_pw_multi_aff(contraction);
2945
1
  umap = isl_union_map_apply_range(isl_union_map_copy(data->expansion),
2946
1
          umap);
2947
1
  upma = isl_union_pw_multi_aff_from_union_map(umap);
2948
1
  contraction = isl_schedule_tree_expansion_get_contraction(tree);
2949
1
  contraction = isl_union_pw_multi_aff_intersect_domain(contraction,
2950
1
                domain);
2951
1
  domain = isl_union_pw_multi_aff_domain(
2952
1
        isl_union_pw_multi_aff_copy(upma));
2953
1
  upma = isl_union_pw_multi_aff_gist(upma, domain);
2954
1
  contraction = isl_union_pw_multi_aff_union_add(contraction, upma);
2955
1
  tree = isl_schedule_tree_expansion_set_contraction_and_expansion(tree,
2956
1
              contraction, expansion);
2957
1
2958
1
  return tree;
2959
1
}
2960
2961
/* Update the tree root "tree" to refer to the group instances
2962
 * in data->group rather than the original domain elements in data->domain.
2963
 * "pos" is the position in the original schedule tree where the modified
2964
 * "tree" will be attached.
2965
 *
2966
 * If we have come across a domain or expansion node before (data->finished
2967
 * is set), then we no longer need perform any modifications.
2968
 *
2969
 * If "tree" is a filter, then we add data->group_universe to the filter.
2970
 * We also remove data->domain_universe from the filter if all the domain
2971
 * elements in this universe that reach the filter node are part of
2972
 * the elements that are being grouped by data->expansion.
2973
 * If "tree" is a band, domain or expansion, then it is handled
2974
 * in a separate function.
2975
 */
2976
static __isl_give isl_schedule_tree *group_ancestor(
2977
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
2978
  void *user)
2979
9
{
2980
9
  struct isl_schedule_group_data *data = user;
2981
9
  isl_union_set *domain;
2982
9
  int is_covered;
2983
9
2984
9
  if (!tree || !pos)
2985
0
    return isl_schedule_tree_free(tree);
2986
9
2987
9
  if (data->finished)
2988
2
    return tree;
2989
7
2990
7
  switch (isl_schedule_tree_get_type(tree)) {
2991
7
  case isl_schedule_node_error:
2992
0
    return isl_schedule_tree_free(tree);
2993
7
  case isl_schedule_node_extension:
2994
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_unsupported,
2995
0
      "grouping not allowed in extended tree",
2996
0
      return isl_schedule_tree_free(tree));
2997
2
  case isl_schedule_node_band:
2998
2
    tree = group_band(tree, pos, data);
2999
2
    break;
3000
0
  case isl_schedule_node_context:
3001
0
    tree = group_context(tree, pos, data);
3002
0
    break;
3003
2
  case isl_schedule_node_domain:
3004
2
    tree = group_domain(tree, pos, data);
3005
2
    data->finished = 1;
3006
2
    break;
3007
1
  case isl_schedule_node_filter:
3008
1
    domain = isl_schedule_node_get_domain(pos);
3009
1
    is_covered = locally_covered_by_domain(domain, data);
3010
1
    isl_union_set_free(domain);
3011
1
    if (is_covered < 0)
3012
0
      return isl_schedule_tree_free(tree);
3013
1
    domain = isl_schedule_tree_filter_get_filter(tree);
3014
1
    if (is_covered)
3015
1
      domain = isl_union_set_subtract(domain,
3016
1
            isl_union_set_copy(data->domain_universe));
3017
1
    domain = isl_union_set_union(domain,
3018
1
            isl_union_set_copy(data->group_universe));
3019
1
    tree = isl_schedule_tree_filter_set_filter(tree, domain);
3020
1
    break;
3021
1
  case isl_schedule_node_expansion:
3022
1
    tree = group_expansion(tree, pos, data);
3023
1
    data->finished = 1;
3024
1
    break;
3025
1
  case isl_schedule_node_leaf:
3026
1
  case isl_schedule_node_guard:
3027
1
  case isl_schedule_node_mark:
3028
1
  case isl_schedule_node_sequence:
3029
1
  case isl_schedule_node_set:
3030
1
    break;
3031
7
  }
3032
7
3033
7
  return tree;
3034
7
}
3035
3036
/* Group the domain elements that reach "node" into instances
3037
 * of a single statement with identifier "group_id".
3038
 * In particular, group the domain elements according to their
3039
 * prefix schedule.
3040
 *
3041
 * That is, introduce an expansion node with as contraction
3042
 * the prefix schedule (with the target space replaced by "group_id")
3043
 * and as expansion the inverse of this contraction (with its range
3044
 * intersected with the domain elements that reach "node").
3045
 * The outer nodes are then modified to refer to the group instances
3046
 * instead of the original domain elements.
3047
 *
3048
 * No instance of "group_id" is allowed to reach "node" prior
3049
 * to the grouping.
3050
 * No ancestor of "node" is allowed to be an extension node.
3051
 *
3052
 * Return a pointer to original node in tree, i.e., the child
3053
 * of the newly introduced expansion node.
3054
 */
3055
__isl_give isl_schedule_node *isl_schedule_node_group(
3056
  __isl_take isl_schedule_node *node, __isl_take isl_id *group_id)
3057
3
{
3058
3
  struct isl_schedule_group_data data = { 0 };
3059
3
  isl_space *space;
3060
3
  isl_union_set *domain;
3061
3
  isl_union_pw_multi_aff *contraction;
3062
3
  isl_union_map *expansion;
3063
3
  int disjoint;
3064
3
3065
3
  if (!node || !group_id)
3066
0
    goto error;
3067
3
  if (check_insert(node) < 0)
3068
0
    goto error;
3069
3
3070
3
  domain = isl_schedule_node_get_domain(node);
3071
3
  data.domain = isl_union_set_copy(domain);
3072
3
  data.domain_universe = isl_union_set_copy(domain);
3073
3
  data.domain_universe = isl_union_set_universe(data.domain_universe);
3074
3
3075
3
  data.dim = isl_schedule_node_get_schedule_depth(node);
3076
3
  if (data.dim == 0) {
3077
0
    isl_ctx *ctx;
3078
0
    isl_set *set;
3079
0
    isl_union_set *group;
3080
0
    isl_union_map *univ;
3081
0
3082
0
    ctx = isl_schedule_node_get_ctx(node);
3083
0
    space = isl_space_set_alloc(ctx, 0, 0);
3084
0
    space = isl_space_set_tuple_id(space, isl_dim_set, group_id);
3085
0
    set = isl_set_universe(isl_space_copy(space));
3086
0
    group = isl_union_set_from_set(set);
3087
0
    expansion = isl_union_map_from_domain_and_range(domain, group);
3088
0
    univ = isl_union_map_universe(isl_union_map_copy(expansion));
3089
0
    contraction = isl_union_pw_multi_aff_from_union_map(univ);
3090
0
    expansion = isl_union_map_reverse(expansion);
3091
3
  } else {
3092
3
    isl_multi_union_pw_aff *prefix;
3093
3
    isl_union_set *univ;
3094
3
3095
3
    prefix =
3096
3
    isl_schedule_node_get_prefix_schedule_multi_union_pw_aff(node);
3097
3
    prefix = isl_multi_union_pw_aff_set_tuple_id(prefix,
3098
3
              isl_dim_set, group_id);
3099
3
    space = isl_multi_union_pw_aff_get_space(prefix);
3100
3
    contraction = isl_union_pw_multi_aff_from_multi_union_pw_aff(
3101
3
              prefix);
3102
3
    univ = isl_union_set_universe(isl_union_set_copy(domain));
3103
3
    contraction =
3104
3
        isl_union_pw_multi_aff_intersect_domain(contraction, univ);
3105
3
    expansion = isl_union_map_from_union_pw_multi_aff(
3106
3
            isl_union_pw_multi_aff_copy(contraction));
3107
3
    expansion = isl_union_map_reverse(expansion);
3108
3
    expansion = isl_union_map_intersect_range(expansion, domain);
3109
3
  }
3110
3
  space = isl_space_map_from_set(space);
3111
3
  data.sched = isl_multi_aff_identity(space);
3112
3
  data.group = isl_union_map_domain(isl_union_map_copy(expansion));
3113
3
  data.group = isl_union_set_coalesce(data.group);
3114
3
  data.group_universe = isl_union_set_copy(data.group);
3115
3
  data.group_universe = isl_union_set_universe(data.group_universe);
3116
3
  data.expansion = isl_union_map_copy(expansion);
3117
3
  data.contraction = isl_union_pw_multi_aff_copy(contraction);
3118
3
  node = isl_schedule_node_insert_expansion(node, contraction, expansion);
3119
3
3120
3
  disjoint = isl_union_set_is_disjoint(data.domain_universe,
3121
3
              data.group_universe);
3122
3
3123
3
  node = update_ancestors(node, &group_ancestor, &data);
3124
3
3125
3
  isl_union_set_free(data.domain);
3126
3
  isl_union_set_free(data.domain_universe);
3127
3
  isl_union_set_free(data.group);
3128
3
  isl_union_set_free(data.group_universe);
3129
3
  isl_multi_aff_free(data.sched);
3130
3
  isl_union_map_free(data.expansion);
3131
3
  isl_union_pw_multi_aff_free(data.contraction);
3132
3
3133
3
  node = isl_schedule_node_child(node, 0);
3134
3
3135
3
  if (!node || disjoint < 0)
3136
0
    return isl_schedule_node_free(node);
3137
3
  if (!disjoint)
3138
3
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
3139
3
      "group instances already reach node",
3140
3
      return isl_schedule_node_free(node));
3141
3
3142
3
  return node;
3143
0
error:
3144
0
  isl_schedule_node_free(node);
3145
0
  isl_id_free(group_id);
3146
0
  return NULL;
3147
3
}
3148
3149
/* Compute the gist of the given band node with respect to "context".
3150
 */
3151
__isl_give isl_schedule_node *isl_schedule_node_band_gist(
3152
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *context)
3153
235
{
3154
235
  isl_schedule_tree *tree;
3155
235
3156
235
  tree = isl_schedule_node_get_tree(node);
3157
235
  tree = isl_schedule_tree_band_gist(tree, context);
3158
235
  return isl_schedule_node_graft_tree(node, tree);
3159
235
}
3160
3161
/* Internal data structure for isl_schedule_node_gist.
3162
 * "n_expansion" is the number of outer expansion nodes
3163
 * with respect to the current position
3164
 * "filters" contains an element for each outer filter, expansion or
3165
 * extension node with respect to the current position, each representing
3166
 * the intersection of the previous element and the filter on the filter node
3167
 * or the expansion/extension of the previous element.
3168
 * The first element in the original context passed to isl_schedule_node_gist.
3169
 */
3170
struct isl_node_gist_data {
3171
  int n_expansion;
3172
  isl_union_set_list *filters;
3173
};
3174
3175
/* Enter the expansion node "node" during a isl_schedule_node_gist traversal.
3176
 *
3177
 * In particular, add an extra element to data->filters containing
3178
 * the expansion of the previous element and replace the expansion
3179
 * and contraction on "node" by the gist with respect to these filters.
3180
 * Also keep track of the fact that we have entered another expansion.
3181
 */
3182
static __isl_give isl_schedule_node *gist_enter_expansion(
3183
  __isl_take isl_schedule_node *node, struct isl_node_gist_data *data)
3184
0
{
3185
0
  int n;
3186
0
  isl_union_set *inner;
3187
0
  isl_union_map *expansion;
3188
0
  isl_union_pw_multi_aff *contraction;
3189
0
3190
0
  data->n_expansion++;
3191
0
3192
0
  n = isl_union_set_list_n_union_set(data->filters);
3193
0
  inner = isl_union_set_list_get_union_set(data->filters, n - 1);
3194
0
  expansion = isl_schedule_node_expansion_get_expansion(node);
3195
0
  inner = isl_union_set_apply(inner, expansion);
3196
0
3197
0
  contraction = isl_schedule_node_expansion_get_contraction(node);
3198
0
  contraction = isl_union_pw_multi_aff_gist(contraction,
3199
0
            isl_union_set_copy(inner));
3200
0
3201
0
  data->filters = isl_union_set_list_add(data->filters, inner);
3202
0
3203
0
  inner = isl_union_set_list_get_union_set(data->filters, n - 1);
3204
0
  expansion = isl_schedule_node_expansion_get_expansion(node);
3205
0
  expansion = isl_union_map_gist_domain(expansion, inner);
3206
0
  node = isl_schedule_node_expansion_set_contraction_and_expansion(node,
3207
0
            contraction, expansion);
3208
0
3209
0
  return node;
3210
0
}
3211
3212
/* Leave the expansion node "node" during a isl_schedule_node_gist traversal.
3213
 *
3214
 * In particular, remove the element in data->filters that was added by
3215
 * gist_enter_expansion and decrement the number of outer expansions.
3216
 *
3217
 * The expansion has already been simplified in gist_enter_expansion.
3218
 * If this simplification results in an identity expansion, then
3219
 * it is removed here.
3220
 */
3221
static __isl_give isl_schedule_node *gist_leave_expansion(
3222
  __isl_take isl_schedule_node *node, struct isl_node_gist_data *data)
3223
0
{
3224
0
  int n;
3225
0
  isl_bool identity;
3226
0
  isl_union_map *expansion;
3227
0
3228
0
  expansion = isl_schedule_node_expansion_get_expansion(node);
3229
0
  identity = isl_union_map_is_identity(expansion);
3230
0
  isl_union_map_free(expansion);
3231
0
3232
0
  if (identity < 0)
3233
0
    node = isl_schedule_node_free(node);
3234
0
  else if (identity)
3235
0
    node = isl_schedule_node_delete(node);
3236
0
3237
0
  n = isl_union_set_list_n_union_set(data->filters);
3238
0
  data->filters = isl_union_set_list_drop(data->filters, n - 1, 1);
3239
0
3240
0
  data->n_expansion--;
3241
0
3242
0
  return node;
3243
0
}
3244
3245
/* Enter the extension node "node" during a isl_schedule_node_gist traversal.
3246
 *
3247
 * In particular, add an extra element to data->filters containing
3248
 * the union of the previous element with the additional domain elements
3249
 * introduced by the extension.
3250
 */
3251
static __isl_give isl_schedule_node *gist_enter_extension(
3252
  __isl_take isl_schedule_node *node, struct isl_node_gist_data *data)
3253
0
{
3254
0
  int n;
3255
0
  isl_union_set *inner, *extra;
3256
0
  isl_union_map *extension;
3257
0
3258
0
  n = isl_union_set_list_n_union_set(data->filters);
3259
0
  inner = isl_union_set_list_get_union_set(data->filters, n - 1);
3260
0
  extension = isl_schedule_node_extension_get_extension(node);
3261
0
  extra = isl_union_map_range(extension);
3262
0
  inner = isl_union_set_union(inner, extra);
3263
0
3264
0
  data->filters = isl_union_set_list_add(data->filters, inner);
3265
0
3266
0
  return node;
3267
0
}
3268
3269
/* Can we finish gisting at this node?
3270
 * That is, is the filter on the current filter node a subset of
3271
 * the original context passed to isl_schedule_node_gist?
3272
 * If we have gone through any expansions, then we cannot perform
3273
 * this test since the current domain elements are incomparable
3274
 * to the domain elements in the original context.
3275
 */
3276
static int gist_done(__isl_keep isl_schedule_node *node,
3277
  struct isl_node_gist_data *data)
3278
918
{
3279
918
  isl_union_set *filter, *outer;
3280
918
  int subset;
3281
918
3282
918
  if (data->n_expansion != 0)
3283
0
    return 0;
3284
918
3285
918
  filter = isl_schedule_node_filter_get_filter(node);
3286
918
  outer = isl_union_set_list_get_union_set(data->filters, 0);
3287
918
  subset = isl_union_set_is_subset(filter, outer);
3288
918
  isl_union_set_free(outer);
3289
918
  isl_union_set_free(filter);
3290
918
3291
918
  return subset;
3292
918
}
3293
3294
/* Callback for "traverse" to enter a node and to move
3295
 * to the deepest initial subtree that should be traversed
3296
 * by isl_schedule_node_gist.
3297
 *
3298
 * The "filters" list is extended by one element each time
3299
 * we come across a filter node by the result of intersecting
3300
 * the last element in the list with the filter on the filter node.
3301
 *
3302
 * If the filter on the current filter node is a subset of
3303
 * the original context passed to isl_schedule_node_gist,
3304
 * then there is no need to go into its subtree since it cannot
3305
 * be further simplified by the context.  The "filters" list is
3306
 * still extended for consistency, but the actual value of the
3307
 * added element is immaterial since it will not be used.
3308
 *
3309
 * Otherwise, the filter on the current filter node is replaced by
3310
 * the gist of the original filter with respect to the intersection
3311
 * of the original context with the intermediate filters.
3312
 *
3313
 * If the new element in the "filters" list is empty, then no elements
3314
 * can reach the descendants of the current filter node.  The subtree
3315
 * underneath the filter node is therefore removed.
3316
 *
3317
 * Each expansion node we come across is handled by
3318
 * gist_enter_expansion.
3319
 *
3320
 * Each extension node we come across is handled by
3321
 * gist_enter_extension.
3322
 */
3323
static __isl_give isl_schedule_node *gist_enter(
3324
  __isl_take isl_schedule_node *node, void *user)
3325
1.06k
{
3326
1.06k
  struct isl_node_gist_data *data = user;
3327
1.06k
3328
2.03k
  do {
3329
2.03k
    isl_union_set *filter, *inner;
3330
2.03k
    int done, empty;
3331
2.03k
    int n;
3332
2.03k
3333
2.03k
    switch (isl_schedule_node_get_type(node)) {
3334
2.03k
    case isl_schedule_node_error:
3335
0
      return isl_schedule_node_free(node);
3336
2.03k
    case isl_schedule_node_expansion:
3337
0
      node = gist_enter_expansion(node, data);
3338
0
      continue;
3339
2.03k
    case isl_schedule_node_extension:
3340
0
      node = gist_enter_extension(node, data);
3341
0
      continue;
3342
2.03k
    case isl_schedule_node_band:
3343
1.11k
    case isl_schedule_node_context:
3344
1.11k
    case isl_schedule_node_domain:
3345
1.11k
    case isl_schedule_node_guard:
3346
1.11k
    case isl_schedule_node_leaf:
3347
1.11k
    case isl_schedule_node_mark:
3348
1.11k
    case isl_schedule_node_sequence:
3349
1.11k
    case isl_schedule_node_set:
3350
1.11k
      continue;
3351
1.11k
    case isl_schedule_node_filter:
3352
918
      break;
3353
918
    }
3354
918
    done = gist_done(node, data);
3355
918
    filter = isl_schedule_node_filter_get_filter(node);
3356
918
    if (done < 0 || done) {
3357
59
      data->filters = isl_union_set_list_add(data->filters,
3358
59
                filter);
3359
59
      if (done < 0)
3360
0
        return isl_schedule_node_free(node);
3361
59
      return node;
3362
59
    }
3363
859
    n = isl_union_set_list_n_union_set(data->filters);
3364
859
    inner = isl_union_set_list_get_union_set(data->filters, n - 1);
3365
859
    filter = isl_union_set_gist(filter, isl_union_set_copy(inner));
3366
859
    node = isl_schedule_node_filter_set_filter(node,
3367
859
            isl_union_set_copy(filter));
3368
859
    filter = isl_union_set_intersect(filter, inner);
3369
859
    empty = isl_union_set_is_empty(filter);
3370
859
    data->filters = isl_union_set_list_add(data->filters, filter);
3371
859
    if (empty < 0)
3372
0
      return isl_schedule_node_free(node);
3373
859
    if (!empty)
3374
523
      continue;
3375
336
    node = isl_schedule_node_child(node, 0);
3376
336
    node = isl_schedule_node_cut(node);
3377
336
    node = isl_schedule_node_parent(node);
3378
336
    return node;
3379
1.63k
  } while (isl_schedule_node_has_children(node) &&
3380
1.63k
    
(node = isl_schedule_node_first_child(node)) != NULL970
);
3381
1.06k
3382
1.06k
  
return node665
;
3383
1.06k
}
3384
3385
/* Callback for "traverse" to leave a node for isl_schedule_node_gist.
3386
 *
3387
 * In particular, if the current node is a filter node, then we remove
3388
 * the element on the "filters" list that was added when we entered
3389
 * the node.  There is no need to compute any gist here, since we
3390
 * already did that when we entered the node.
3391
 *
3392
 * Expansion nodes are handled by gist_leave_expansion.
3393
 *
3394
 * If the current node is an extension, then remove the element
3395
 * in data->filters that was added by gist_enter_extension.
3396
 *
3397
 * If the current node is a band node, then we compute the gist of
3398
 * the band node with respect to the intersection of the original context
3399
 * and the intermediate filters.
3400
 *
3401
 * If the current node is a sequence or set node, then some of
3402
 * the filter children may have become empty and so they are removed.
3403
 * If only one child is left, then the set or sequence node along with
3404
 * the single remaining child filter is removed.  The filter can be
3405
 * removed because the filters on a sequence or set node are supposed
3406
 * to partition the incoming domain instances.
3407
 * In principle, it should then be impossible for there to be zero
3408
 * remaining children, but should this happen, we replace the entire
3409
 * subtree with an empty filter.
3410
 */
3411
static __isl_give isl_schedule_node *gist_leave(
3412
  __isl_take isl_schedule_node *node, void *user)
3413
2.03k
{
3414
2.03k
  struct isl_node_gist_data *data = user;
3415
2.03k
  isl_schedule_tree *tree;
3416
2.03k
  int i, n;
3417
2.03k
  isl_union_set *filter;
3418
2.03k
3419
2.03k
  switch (isl_schedule_node_get_type(node)) {
3420
2.03k
  case isl_schedule_node_error:
3421
0
    return isl_schedule_node_free(node);
3422
2.03k
  case isl_schedule_node_expansion:
3423
0
    node = gist_leave_expansion(node, data);
3424
0
    break;
3425
2.03k
  case isl_schedule_node_extension:
3426
918
  case isl_schedule_node_filter:
3427
918
    n = isl_union_set_list_n_union_set(data->filters);
3428
918
    data->filters = isl_union_set_list_drop(data->filters,
3429
918
              n - 1, 1);
3430
918
    break;
3431
918
  case isl_schedule_node_band:
3432
235
    n = isl_union_set_list_n_union_set(data->filters);
3433
235
    filter = isl_union_set_list_get_union_set(data->filters, n - 1);
3434
235
    node = isl_schedule_node_band_gist(node, filter);
3435
235
    break;
3436
918
  case isl_schedule_node_set:
3437
212
  case isl_schedule_node_sequence:
3438
212
    tree = isl_schedule_node_get_tree(node);
3439
212
    n = isl_schedule_tree_n_children(tree);
3440
1.13k
    for (i = n - 1; i >= 0; 
--i918
) {
3441
918
      isl_schedule_tree *child;
3442
918
      isl_union_set *filter;
3443
918
      int empty;
3444
918
3445
918
      child = isl_schedule_tree_get_child(tree, i);
3446
918
      filter = isl_schedule_tree_filter_get_filter(child);
3447
918
      empty = isl_union_set_is_empty(filter);
3448
918
      isl_union_set_free(filter);
3449
918
      isl_schedule_tree_free(child);
3450
918
      if (empty < 0)
3451
0
        tree = isl_schedule_tree_free(tree);
3452
918
      else if (empty)
3453
336
        tree = isl_schedule_tree_drop_child(tree, i);
3454
918
    }
3455
212
    n = isl_schedule_tree_n_children(tree);
3456
212
    node = isl_schedule_node_graft_tree(node, tree);
3457
212
    if (n == 1) {
3458
133
      node = isl_schedule_node_delete(node);
3459
133
      node = isl_schedule_node_delete(node);
3460
133
    } else 
if (79
n == 079
) {
3461
0
      isl_space *space;
3462
0
3463
0
      filter =
3464
0
          isl_union_set_list_get_union_set(data->filters, 0);
3465
0
      space = isl_union_set_get_space(filter);
3466
0
      isl_union_set_free(filter);
3467
0
      filter = isl_union_set_empty(space);
3468
0
      node = isl_schedule_node_cut(node);
3469
0
      node = isl_schedule_node_insert_filter(node, filter);
3470
0
    }
3471
212
    break;
3472
665
  case isl_schedule_node_context:
3473
665
  case isl_schedule_node_domain:
3474
665
  case isl_schedule_node_guard:
3475
665
  case isl_schedule_node_leaf:
3476
665
  case isl_schedule_node_mark:
3477
665
    break;
3478
2.03k
  }
3479
2.03k
3480
2.03k
  return node;
3481
2.03k
}
3482
3483
/* Compute the gist of the subtree at "node" with respect to
3484
 * the reaching domain elements in "context".
3485
 * In particular, compute the gist of all band and filter nodes
3486
 * in the subtree with respect to "context".  Children of set or sequence
3487
 * nodes that end up with an empty filter are removed completely.
3488
 *
3489
 * We keep track of the intersection of "context" with all outer filters
3490
 * of the current node within the subtree in the final element of "filters".
3491
 * Initially, this list contains the single element "context" and it is
3492
 * extended or shortened each time we enter or leave a filter node.
3493
 */
3494
__isl_give isl_schedule_node *isl_schedule_node_gist(
3495
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *context)
3496
354
{
3497
354
  struct isl_node_gist_data data;
3498
354
3499
354
  data.n_expansion = 0;
3500
354
  data.filters = isl_union_set_list_from_union_set(context);
3501
354
  node = traverse(node, &gist_enter, &gist_leave, &data);
3502
354
  isl_union_set_list_free(data.filters);
3503
354
  return node;
3504
354
}
3505
3506
/* Intersect the domain of domain node "node" with "domain".
3507
 *
3508
 * If the domain of "node" is already a subset of "domain",
3509
 * then nothing needs to be changed.
3510
 *
3511
 * Otherwise, we replace the domain of the domain node by the intersection
3512
 * and simplify the subtree rooted at "node" with respect to this intersection.
3513
 */
3514
__isl_give isl_schedule_node *isl_schedule_node_domain_intersect_domain(
3515
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *domain)
3516
3.07k
{
3517
3.07k
  isl_schedule_tree *tree;
3518
3.07k
  isl_union_set *uset;
3519
3.07k
  int is_subset;
3520
3.07k
3521
3.07k
  if (!node || !domain)
3522
0
    goto error;
3523
3.07k
3524
3.07k
  uset = isl_schedule_tree_domain_get_domain(node->tree);
3525
3.07k
  is_subset = isl_union_set_is_subset(uset, domain);
3526
3.07k
  isl_union_set_free(uset);
3527
3.07k
  if (is_subset < 0)
3528
0
    goto error;
3529
3.07k
  if (is_subset) {
3530
2.89k
    isl_union_set_free(domain);
3531
2.89k
    return node;
3532
2.89k
  }
3533
182
3534
182
  tree = isl_schedule_tree_copy(node->tree);
3535
182
  uset = isl_schedule_tree_domain_get_domain(tree);
3536
182
  uset = isl_union_set_intersect(uset, domain);
3537
182
  tree = isl_schedule_tree_domain_set_domain(tree,
3538
182
                isl_union_set_copy(uset));
3539
182
  node = isl_schedule_node_graft_tree(node, tree);
3540
182
3541
182
  node = isl_schedule_node_child(node, 0);
3542
182
  node = isl_schedule_node_gist(node, uset);
3543
182
  node = isl_schedule_node_parent(node);
3544
182
3545
182
  return node;
3546
0
error:
3547
0
  isl_schedule_node_free(node);
3548
0
  isl_union_set_free(domain);
3549
0
  return NULL;
3550
3.07k
}
3551
3552
/* Replace the domain of domain node "node" with the gist
3553
 * of the original domain with respect to the parameter domain "context".
3554
 */
3555
__isl_give isl_schedule_node *isl_schedule_node_domain_gist_params(
3556
  __isl_take isl_schedule_node *node, __isl_take isl_set *context)
3557
1.13k
{
3558
1.13k
  isl_union_set *domain;
3559
1.13k
  isl_schedule_tree *tree;
3560
1.13k
3561
1.13k
  if (!node || !context)
3562
0
    goto error;
3563
1.13k
3564
1.13k
  tree = isl_schedule_tree_copy(node->tree);
3565
1.13k
  domain = isl_schedule_tree_domain_get_domain(node->tree);
3566
1.13k
  domain = isl_union_set_gist_params(domain, context);
3567
1.13k
  tree = isl_schedule_tree_domain_set_domain(tree, domain);
3568
1.13k
  node = isl_schedule_node_graft_tree(node, tree);
3569
1.13k
3570
1.13k
  return node;
3571
0
error:
3572
0
  isl_schedule_node_free(node);
3573
0
  isl_set_free(context);
3574
0
  return NULL;
3575
1.13k
}
3576
3577
/* Internal data structure for isl_schedule_node_get_subtree_expansion.
3578
 * "expansions" contains a list of accumulated expansions
3579
 * for each outer expansion, set or sequence node.  The first element
3580
 * in the list is an identity mapping on the reaching domain elements.
3581
 * "res" collects the results.
3582
 */
3583
struct isl_subtree_expansion_data {
3584
  isl_union_map_list *expansions;
3585
  isl_union_map *res;
3586
};
3587
3588
/* Callback for "traverse" to enter a node and to move
3589
 * to the deepest initial subtree that should be traversed
3590
 * by isl_schedule_node_get_subtree_expansion.
3591
 *
3592
 * Whenever we come across an expansion node, the last element
3593
 * of data->expansions is combined with the expansion
3594
 * on the expansion node.
3595
 *
3596
 * Whenever we come across a filter node that is the child
3597
 * of a set or sequence node, data->expansions is extended
3598
 * with a new element that restricts the previous element
3599
 * to the elements selected by the filter.
3600
 * The previous element can then be reused while backtracking.
3601
 */
3602
static __isl_give isl_schedule_node *subtree_expansion_enter(
3603
  __isl_take isl_schedule_node *node, void *user)
3604
3
{
3605
3
  struct isl_subtree_expansion_data *data = user;
3606
3
3607
12
  do {
3608
12
    enum isl_schedule_node_type type;
3609
12
    isl_union_set *filter;
3610
12
    isl_union_map *inner, *expansion;
3611
12
    int n;
3612
12
3613
12
    switch (isl_schedule_node_get_type(node)) {
3614
12
    case isl_schedule_node_error:
3615
0
      return isl_schedule_node_free(node);
3616
12
    case isl_schedule_node_filter:
3617
4
      type = isl_schedule_node_get_parent_type(node);
3618
4
      if (type != isl_schedule_node_set &&
3619
4
          type != isl_schedule_node_sequence)
3620
0
        break;
3621
4
      filter = isl_schedule_node_filter_get_filter(node);
3622
4
      n = isl_union_map_list_n_union_map(data->expansions);
3623
4
      inner =
3624
4
          isl_union_map_list_get_union_map(data->expansions,
3625
4
                n - 1);
3626
4
      inner = isl_union_map_intersect_range(inner, filter);
3627
4
      data->expansions =
3628
4
          isl_union_map_list_add(data->expansions, inner);
3629
4
      break;
3630
4
    case isl_schedule_node_expansion:
3631
2
      n = isl_union_map_list_n_union_map(data->expansions);
3632
2
      expansion =
3633
2
        isl_schedule_node_expansion_get_expansion(node);
3634
2
      inner =
3635
2
          isl_union_map_list_get_union_map(data->expansions,
3636
2
                n - 1);
3637
2
      inner = isl_union_map_apply_range(inner, expansion);
3638
2
      data->expansions =
3639
2
          isl_union_map_list_set_union_map(data->expansions,
3640
2
                n - 1, inner);
3641
2
      break;
3642
6
    case isl_schedule_node_band:
3643
6
    case isl_schedule_node_context:
3644
6
    case isl_schedule_node_domain:
3645
6
    case isl_schedule_node_extension:
3646
6
    case isl_schedule_node_guard:
3647
6
    case isl_schedule_node_leaf:
3648
6
    case isl_schedule_node_mark:
3649
6
    case isl_schedule_node_sequence:
3650
6
    case isl_schedule_node_set:
3651
6
      break;
3652
12
    }
3653
12
  } while (isl_schedule_node_has_children(node) &&
3654
12
    
(node = isl_schedule_node_first_child(node)) != NULL9
);
3655
3
3656
3
  return node;
3657
3
}
3658
3659
/* Callback for "traverse" to leave a node for
3660
 * isl_schedule_node_get_subtree_expansion.
3661
 *
3662
 * If we come across a filter node that is the child
3663
 * of a set or sequence node, then we remove the element
3664
 * of data->expansions that was added in subtree_expansion_enter.
3665
 *
3666
 * If we reach a leaf node, then the accumulated expansion is
3667
 * added to data->res.
3668
 */
3669
static __isl_give isl_schedule_node *subtree_expansion_leave(
3670
  __isl_take isl_schedule_node *node, void *user)
3671
12
{
3672
12
  struct isl_subtree_expansion_data *data = user;
3673
12
  int n;
3674
12
  isl_union_map *inner;
3675
12
  enum isl_schedule_node_type type;
3676
12
3677
12
  switch (isl_schedule_node_get_type(node)) {
3678
12
  case isl_schedule_node_error:
3679
0
    return isl_schedule_node_free(node);
3680
12
  case isl_schedule_node_filter:
3681
4
    type = isl_schedule_node_get_parent_type(node);
3682
4
    if (type != isl_schedule_node_set &&
3683
4
        type != isl_schedule_node_sequence)
3684
0
      break;
3685
4
    n = isl_union_map_list_n_union_map(data->expansions);
3686
4
    data->expansions = isl_union_map_list_drop(data->expansions,
3687
4
              n - 1, 1);
3688
4
    break;
3689
4
  case isl_schedule_node_leaf:
3690
3
    n = isl_union_map_list_n_union_map(data->expansions);
3691
3
    inner = isl_union_map_list_get_union_map(data->expansions,
3692
3
              n - 1);
3693
3
    data->res = isl_union_map_union(data->res, inner);
3694
3
    break;
3695
5
  case isl_schedule_node_band:
3696
5
  case isl_schedule_node_context:
3697
5
  case isl_schedule_node_domain:
3698
5
  case isl_schedule_node_expansion:
3699
5
  case isl_schedule_node_extension:
3700
5
  case isl_schedule_node_guard:
3701
5
  case isl_schedule_node_mark:
3702
5
  case isl_schedule_node_sequence:
3703
5
  case isl_schedule_node_set:
3704
5
    break;
3705
12
  }
3706
12
3707
12
  return node;
3708
12
}
3709
3710
/* Return a mapping from the domain elements that reach "node"
3711
 * to the corresponding domain elements in the leaves of the subtree
3712
 * rooted at "node" obtained by composing the intermediate expansions.
3713
 *
3714
 * We start out with an identity mapping between the domain elements
3715
 * that reach "node" and compose it with all the expansions
3716
 * on a path from "node" to a leaf while traversing the subtree.
3717
 * Within the children of an a sequence or set node, the
3718
 * accumulated expansion is restricted to the elements selected
3719
 * by the filter child.
3720
 */
3721
__isl_give isl_union_map *isl_schedule_node_get_subtree_expansion(
3722
  __isl_keep isl_schedule_node *node)
3723
1
{
3724
1
  struct isl_subtree_expansion_data data;
3725
1
  isl_space *space;
3726
1
  isl_union_set *domain;
3727
1
  isl_union_map *expansion;
3728
1
3729
1
  if (!node)
3730
0
    return NULL;
3731
1
3732
1
  domain = isl_schedule_node_get_universe_domain(node);
3733
1
  space = isl_union_set_get_space(domain);
3734
1
  expansion = isl_union_set_identity(domain);
3735
1
  data.res = isl_union_map_empty(space);
3736
1
  data.expansions = isl_union_map_list_from_union_map(expansion);
3737
1
3738
1
  node = isl_schedule_node_copy(node);
3739
1
  node = traverse(node, &subtree_expansion_enter,
3740
1
      &subtree_expansion_leave, &data);
3741
1
  if (!node)
3742
0
    data.res = isl_union_map_free(data.res);
3743
1
  isl_schedule_node_free(node);
3744
1
3745
1
  isl_union_map_list_free(data.expansions);
3746
1
3747
1
  return data.res;
3748
1
}
3749
3750
/* Internal data structure for isl_schedule_node_get_subtree_contraction.
3751
 * "contractions" contains a list of accumulated contractions
3752
 * for each outer expansion, set or sequence node.  The first element
3753
 * in the list is an identity mapping on the reaching domain elements.
3754
 * "res" collects the results.
3755
 */
3756
struct isl_subtree_contraction_data {
3757
  isl_union_pw_multi_aff_list *contractions;
3758
  isl_union_pw_multi_aff *res;
3759
};
3760
3761
/* Callback for "traverse" to enter a node and to move
3762
 * to the deepest initial subtree that should be traversed
3763
 * by isl_schedule_node_get_subtree_contraction.
3764
 *
3765
 * Whenever we come across an expansion node, the last element
3766
 * of data->contractions is combined with the contraction
3767
 * on the expansion node.
3768
 *
3769
 * Whenever we come across a filter node that is the child
3770
 * of a set or sequence node, data->contractions is extended
3771
 * with a new element that restricts the previous element
3772
 * to the elements selected by the filter.
3773
 * The previous element can then be reused while backtracking.
3774
 */
3775
static __isl_give isl_schedule_node *subtree_contraction_enter(
3776
  __isl_take isl_schedule_node *node, void *user)
3777
7
{
3778
7
  struct isl_subtree_contraction_data *data = user;
3779
7
3780
21
  do {
3781
21
    enum isl_schedule_node_type type;
3782
21
    isl_union_set *filter;
3783
21
    isl_union_pw_multi_aff *inner, *contraction;
3784
21
    int n;
3785
21
3786
21
    switch (isl_schedule_node_get_type(node)) {
3787
21
    case isl_schedule_node_error:
3788
0
      return isl_schedule_node_free(node);
3789
21
    case isl_schedule_node_filter:
3790
0
      type = isl_schedule_node_get_parent_type(node);
3791
0
      if (type != isl_schedule_node_set &&
3792
0
          type != isl_schedule_node_sequence)
3793
0
        break;
3794
0
      filter = isl_schedule_node_filter_get_filter(node);
3795
0
      n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff(
3796
0
            data->contractions);
3797
0
      inner =
3798
0
          isl_union_pw_multi_aff_list_get_union_pw_multi_aff(
3799
0
            data->contractions, n - 1);
3800
0
      inner = isl_union_pw_multi_aff_intersect_domain(inner,
3801
0
                filter);
3802
0
      data->contractions =
3803
0
          isl_union_pw_multi_aff_list_add(data->contractions,
3804
0
                inner);
3805
0
      break;
3806
0
    case isl_schedule_node_expansion:
3807
0
      n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff(
3808
0
            data->contractions);
3809
0
      contraction =
3810
0
          isl_schedule_node_expansion_get_contraction(node);
3811
0
      inner =
3812
0
          isl_union_pw_multi_aff_list_get_union_pw_multi_aff(
3813
0
            data->contractions, n - 1);
3814
0
      inner =
3815
0
          isl_union_pw_multi_aff_pullback_union_pw_multi_aff(
3816
0
            inner, contraction);
3817
0
      data->contractions =
3818
0
          isl_union_pw_multi_aff_list_set_union_pw_multi_aff(
3819
0
          data->contractions, n - 1, inner);
3820
0
      break;
3821
21
    case isl_schedule_node_band:
3822
21
    case isl_schedule_node_context:
3823
21
    case isl_schedule_node_domain:
3824
21
    case isl_schedule_node_extension:
3825
21
    case isl_schedule_node_guard:
3826
21
    case isl_schedule_node_leaf:
3827
21
    case isl_schedule_node_mark:
3828
21
    case isl_schedule_node_sequence:
3829
21
    case isl_schedule_node_set:
3830
21
      break;
3831
21
    }
3832
21
  } while (isl_schedule_node_has_children(node) &&
3833
21
    
(node = isl_schedule_node_first_child(node)) != NULL14
);
3834
7
3835
7
  return node;
3836
7
}
3837
3838
/* Callback for "traverse" to leave a node for
3839
 * isl_schedule_node_get_subtree_contraction.
3840
 *
3841
 * If we come across a filter node that is the child
3842
 * of a set or sequence node, then we remove the element
3843
 * of data->contractions that was added in subtree_contraction_enter.
3844
 *
3845
 * If we reach a leaf node, then the accumulated contraction is
3846
 * added to data->res.
3847
 */
3848
static __isl_give isl_schedule_node *subtree_contraction_leave(
3849
  __isl_take isl_schedule_node *node, void *user)
3850
21
{
3851
21
  struct isl_subtree_contraction_data *data = user;
3852
21
  int n;
3853
21
  isl_union_pw_multi_aff *inner;
3854
21
  enum isl_schedule_node_type type;
3855
21
3856
21
  switch (isl_schedule_node_get_type(node)) {
3857
21
  case isl_schedule_node_error:
3858
0
    return isl_schedule_node_free(node);
3859
21
  case isl_schedule_node_filter:
3860
0
    type = isl_schedule_node_get_parent_type(node);
3861
0
    if (type != isl_schedule_node_set &&
3862
0
        type != isl_schedule_node_sequence)
3863
0
      break;
3864
0
    n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff(
3865
0
            data->contractions);
3866
0
    data->contractions =
3867
0
      isl_union_pw_multi_aff_list_drop(data->contractions,
3868
0
              n - 1, 1);
3869
0
    break;
3870
7
  case isl_schedule_node_leaf:
3871
7
    n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff(
3872
7
            data->contractions);
3873
7
    inner = isl_union_pw_multi_aff_list_get_union_pw_multi_aff(
3874
7
            data->contractions, n - 1);
3875
7
    data->res = isl_union_pw_multi_aff_union_add(data->res, inner);
3876
7
    break;
3877
14
  case isl_schedule_node_band:
3878
14
  case isl_schedule_node_context:
3879
14
  case isl_schedule_node_domain:
3880
14
  case isl_schedule_node_expansion:
3881
14
  case isl_schedule_node_extension:
3882
14
  case isl_schedule_node_guard:
3883
14
  case isl_schedule_node_mark:
3884
14
  case isl_schedule_node_sequence:
3885
14
  case isl_schedule_node_set:
3886
14
    break;
3887
21
  }
3888
21
3889
21
  return node;
3890
21
}
3891
3892
/* Return a mapping from the domain elements in the leaves of the subtree
3893
 * rooted at "node" to the corresponding domain elements that reach "node"
3894
 * obtained by composing the intermediate contractions.
3895
 *
3896
 * We start out with an identity mapping between the domain elements
3897
 * that reach "node" and compose it with all the contractions
3898
 * on a path from "node" to a leaf while traversing the subtree.
3899
 * Within the children of an a sequence or set node, the
3900
 * accumulated contraction is restricted to the elements selected
3901
 * by the filter child.
3902
 */
3903
__isl_give isl_union_pw_multi_aff *isl_schedule_node_get_subtree_contraction(
3904
  __isl_keep isl_schedule_node *node)
3905
7
{
3906
7
  struct isl_subtree_contraction_data data;
3907
7
  isl_space *space;
3908
7
  isl_union_set *domain;
3909
7
  isl_union_pw_multi_aff *contraction;
3910
7
3911
7
  if (!node)
3912
0
    return NULL;
3913
7
3914
7
  domain = isl_schedule_node_get_universe_domain(node);
3915
7
  space = isl_union_set_get_space(domain);
3916
7
  contraction = isl_union_set_identity_union_pw_multi_aff(domain);
3917
7
  data.res = isl_union_pw_multi_aff_empty(space);
3918
7
  data.contractions =
3919
7
      isl_union_pw_multi_aff_list_from_union_pw_multi_aff(contraction);
3920
7
3921
7
  node = isl_schedule_node_copy(node);
3922
7
  node = traverse(node, &subtree_contraction_enter,
3923
7
      &subtree_contraction_leave, &data);
3924
7
  if (!node)
3925
0
    data.res = isl_union_pw_multi_aff_free(data.res);
3926
7
  isl_schedule_node_free(node);
3927
7
3928
7
  isl_union_pw_multi_aff_list_free(data.contractions);
3929
7
3930
7
  return data.res;
3931
7
}
3932
3933
/* Do the nearest "n" ancestors of "node" have the types given in "types"
3934
 * (starting at the parent of "node")?
3935
 */
3936
static int has_ancestors(__isl_keep isl_schedule_node *node,
3937
  int n, enum isl_schedule_node_type *types)
3938
24
{
3939
24
  int i, n_ancestor;
3940
24
3941
24
  if (!node)
3942
0
    return -1;
3943
24
3944
24
  n_ancestor = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
3945
24
  if (n_ancestor < n)
3946
0
    return 0;
3947
24
3948
24
  for (i = 0; i < n; 
++i0
) {
3949
24
    isl_schedule_tree *tree;
3950
24
    int correct_type;
3951
24
3952
24
    tree = isl_schedule_tree_list_get_schedule_tree(node->ancestors,
3953
24
                  n_ancestor - 1 - i);
3954
24
    if (!tree)
3955
0
      return -1;
3956
24
    correct_type = isl_schedule_tree_get_type(tree) == types[i];
3957
24
    isl_schedule_tree_free(tree);
3958
24
    if (!correct_type)
3959
24
      return 0;
3960
24
  }
3961
24
3962
24
  
return 10
;
3963
24
}
3964
3965
/* Given a node "node" that appears in an extension (i.e., it is the child
3966
 * of a filter in a sequence inside an extension node), are the spaces
3967
 * of the extension specified by "extension" disjoint from those
3968
 * of both the original extension and the domain elements that reach
3969
 * that original extension?
3970
 */
3971
static int is_disjoint_extension(__isl_keep isl_schedule_node *node,
3972
  __isl_keep isl_union_map *extension)
3973
0
{
3974
0
  isl_union_map *old;
3975
0
  isl_union_set *domain;
3976
0
  int empty;
3977
0
3978
0
  node = isl_schedule_node_copy(node);
3979
0
  node = isl_schedule_node_parent(node);
3980
0
  node = isl_schedule_node_parent(node);
3981
0
  node = isl_schedule_node_parent(node);
3982
0
  old = isl_schedule_node_extension_get_extension(node);
3983
0
  domain = isl_schedule_node_get_universe_domain(node);
3984
0
  isl_schedule_node_free(node);
3985
0
  old = isl_union_map_universe(old);
3986
0
  domain = isl_union_set_union(domain, isl_union_map_range(old));
3987
0
  extension = isl_union_map_copy(extension);
3988
0
  extension = isl_union_map_intersect_range(extension, domain);
3989
0
  empty = isl_union_map_is_empty(extension);
3990
0
  isl_union_map_free(extension);
3991
0
3992
0
  return empty;
3993
0
}
3994
3995
/* Given a node "node" that is governed by an extension node, extend
3996
 * that extension node with "extension".
3997
 *
3998
 * In particular, "node" is the child of a filter in a sequence that
3999
 * is in turn a child of an extension node.  Extend that extension node
4000
 * with "extension".
4001
 *
4002
 * Return a pointer to the parent of the original node (i.e., a filter).
4003
 */
4004
static __isl_give isl_schedule_node *extend_extension(
4005
  __isl_take isl_schedule_node *node, __isl_take isl_union_map *extension)
4006
0
{
4007
0
  int pos;
4008
0
  int disjoint;
4009
0
  isl_union_map *node_extension;
4010
0
4011
0
  node = isl_schedule_node_parent(node);
4012
0
  pos = isl_schedule_node_get_child_position(node);
4013
0
  node = isl_schedule_node_parent(node);
4014
0
  node = isl_schedule_node_parent(node);
4015
0
  node_extension = isl_schedule_node_extension_get_extension(node);
4016
0
  disjoint = isl_union_map_is_disjoint(extension, node_extension);
4017
0
  extension = isl_union_map_union(extension, node_extension);
4018
0
  node = isl_schedule_node_extension_set_extension(node, extension);
4019
0
  node = isl_schedule_node_child(node, 0);
4020
0
  node = isl_schedule_node_child(node, pos);
4021
0
4022
0
  if (disjoint < 0)
4023
0
    return isl_schedule_node_free(node);
4024
0
  if (!node)
4025
0
    return NULL;
4026
0
  if (!disjoint)
4027
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
4028
0
      "extension domain should be disjoint from earlier "
4029
0
      "extensions", return isl_schedule_node_free(node));
4030
0
4031
0
  return node;
4032
0
}
4033
4034
/* Return the universe of "uset" if this universe is disjoint from "ref".
4035
 * Otherwise, return "uset".
4036
 *
4037
 * Also check if "uset" itself is disjoint from "ref", reporting
4038
 * an error if it is not.
4039
 */
4040
static __isl_give isl_union_set *replace_by_universe_if_disjoint(
4041
  __isl_take isl_union_set *uset, __isl_keep isl_union_set *ref)
4042
48
{
4043
48
  int disjoint;
4044
48
  isl_union_set *universe;
4045
48
4046
48
  disjoint = isl_union_set_is_disjoint(uset, ref);
4047
48
  if (disjoint < 0)
4048
0
    return isl_union_set_free(uset);
4049
48
  if (!disjoint)
4050
48
    
isl_die0
(isl_union_set_get_ctx(uset), isl_error_invalid,
4051
48
      "extension domain should be disjoint from "
4052
48
      "current domain", return isl_union_set_free(uset));
4053
48
4054
48
  universe = isl_union_set_universe(isl_union_set_copy(uset));
4055
48
  disjoint = isl_union_set_is_disjoint(universe, ref);
4056
48
  if (disjoint >= 0 && disjoint) {
4057
48
    isl_union_set_free(uset);
4058
48
    return universe;
4059
48
  }
4060
0
  isl_union_set_free(universe);
4061
0
4062
0
  if (disjoint < 0)
4063
0
    return isl_union_set_free(uset);
4064
0
  return uset;
4065
0
}
4066
4067
/* Insert an extension node on top of "node" with extension "extension".
4068
 * In addition, insert a filter that separates node from the extension
4069
 * between the extension node and "node".
4070
 * Return a pointer to the inserted filter node.
4071
 *
4072
 * If "node" already appears in an extension (i.e., if it is the child
4073
 * of a filter in a sequence inside an extension node), then extend that
4074
 * extension with "extension" instead.
4075
 * In this case, a pointer to the original filter node is returned.
4076
 * Note that if some of the elements in the new extension live in the
4077
 * same space as those of the original extension or the domain elements
4078
 * reaching the original extension, then we insert a new extension anyway.
4079
 * Otherwise, we would have to adjust the filters in the sequence child
4080
 * of the extension to ensure that the elements in the new extension
4081
 * are filtered out.
4082
 */
4083
static __isl_give isl_schedule_node *insert_extension(
4084
  __isl_take isl_schedule_node *node, __isl_take isl_union_map *extension)
4085
24
{
4086
24
  enum isl_schedule_node_type ancestors[] =
4087
24
    { isl_schedule_node_filter, isl_schedule_node_sequence,
4088
24
      isl_schedule_node_extension };
4089
24
  isl_union_set *domain;
4090
24
  isl_union_set *filter;
4091
24
  int in_ext;
4092
24
4093
24
  in_ext = has_ancestors(node, 3, ancestors);
4094
24
  if (in_ext < 0)
4095
0
    goto error;
4096
24
  if (in_ext) {
4097
0
    int disjoint;
4098
0
4099
0
    disjoint = is_disjoint_extension(node, extension);
4100
0
    if (disjoint < 0)
4101
0
      goto error;
4102
0
    if (disjoint)
4103
0
      return extend_extension(node, extension);
4104
24
  }
4105
24
4106
24
  filter = isl_schedule_node_get_domain(node);
4107
24
  domain = isl_union_map_range(isl_union_map_copy(extension));
4108
24
  filter = replace_by_universe_if_disjoint(filter, domain);
4109
24
  isl_union_set_free(domain);
4110
24
4111
24
  node = isl_schedule_node_insert_filter(node, filter);
4112
24
  node = isl_schedule_node_insert_extension(node, extension);
4113
24
  node = isl_schedule_node_child(node, 0);
4114
24
  return node;
4115
0
error:
4116
0
  isl_schedule_node_free(node);
4117
0
  isl_union_map_free(extension);
4118
0
  return NULL;
4119
24
}
4120
4121
/* Replace the subtree that "node" points to by "tree" (which has
4122
 * a sequence root with two children), except if the parent of "node"
4123
 * is a sequence as well, in which case "tree" is spliced at the position
4124
 * of "node" in its parent.
4125
 * Return a pointer to the child of the "tree_pos" (filter) child of "tree"
4126
 * in the updated schedule tree.
4127
 */
4128
static __isl_give isl_schedule_node *graft_or_splice(
4129
  __isl_take isl_schedule_node *node, __isl_take isl_schedule_tree *tree,
4130
  int tree_pos)
4131
24
{
4132
24
  int pos;
4133
24
4134
24
  if (isl_schedule_node_get_parent_type(node) ==
4135
24
      isl_schedule_node_sequence) {
4136
0
    pos = isl_schedule_node_get_child_position(node);
4137
0
    node = isl_schedule_node_parent(node);
4138
0
    node = isl_schedule_node_sequence_splice(node, pos, tree);
4139
24
  } else {
4140
24
    pos = 0;
4141
24
    node = isl_schedule_node_graft_tree(node, tree);
4142
24
  }
4143
24
  node = isl_schedule_node_child(node, pos + tree_pos);
4144
24
  node = isl_schedule_node_child(node, 0);
4145
24
4146
24
  return node;
4147
24
}
4148
4149
/* Insert a node "graft" into the schedule tree of "node" such that it
4150
 * is executed before (if "before" is set) or after (if "before" is not set)
4151
 * the node that "node" points to.
4152
 * The root of "graft" is an extension node.
4153
 * Return a pointer to the node that "node" pointed to.
4154
 *
4155
 * We first insert an extension node on top of "node" (or extend
4156
 * the extension node if there already is one), with a filter on "node"
4157
 * separating it from the extension.
4158
 * We then insert a filter in the graft to separate it from the original
4159
 * domain elements and combine the original and new tree in a sequence.
4160
 * If we have extended an extension node, then the children of this
4161
 * sequence are spliced in the sequence of the extended extension
4162
 * at the position where "node" appears in the original extension.
4163
 * Otherwise, the sequence pair is attached to the new extension node.
4164
 */
4165
static __isl_give isl_schedule_node *graft_extension(
4166
  __isl_take isl_schedule_node *node, __isl_take isl_schedule_node *graft,
4167
  int before)
4168
24
{
4169
24
  isl_union_map *extension;
4170
24
  isl_union_set *graft_domain;
4171
24
  isl_union_set *node_domain;
4172
24
  isl_schedule_tree *tree, *tree_graft;
4173
24
4174
24
  extension = isl_schedule_node_extension_get_extension(graft);
4175
24
  graft_domain = isl_union_map_range(isl_union_map_copy(extension));
4176
24
  node_domain = isl_schedule_node_get_universe_domain(node);
4177
24
  node = insert_extension(node, extension);
4178
24
4179
24
  graft_domain = replace_by_universe_if_disjoint(graft_domain,
4180
24
              node_domain);
4181
24
  isl_union_set_free(node_domain);
4182
24
4183
24
  tree = isl_schedule_node_get_tree(node);
4184
24
  if (!isl_schedule_node_has_children(graft)) {
4185
0
    tree_graft = isl_schedule_tree_from_filter(graft_domain);
4186
24
  } else {
4187
24
    graft = isl_schedule_node_child(graft, 0);
4188
24
    tree_graft = isl_schedule_node_get_tree(graft);
4189
24
    tree_graft = isl_schedule_tree_insert_filter(tree_graft,
4190
24
                graft_domain);
4191
24
  }
4192
24
  if (before)
4193
24
    tree = isl_schedule_tree_sequence_pair(tree_graft, tree);
4194
0
  else
4195
0
    tree = isl_schedule_tree_sequence_pair(tree, tree_graft);
4196
24
  node = graft_or_splice(node, tree, before);
4197
24
4198
24
  isl_schedule_node_free(graft);
4199
24
4200
24
  return node;
4201
24
}
4202
4203
/* Replace the root domain node of "node" by an extension node suitable
4204
 * for insertion at "pos".
4205
 * That is, create an extension node that maps the outer band nodes
4206
 * at "pos" to the domain of the root node of "node" and attach
4207
 * the child of this root node to the extension node.
4208
 */
4209
static __isl_give isl_schedule_node *extension_from_domain(
4210
  __isl_take isl_schedule_node *node, __isl_keep isl_schedule_node *pos)
4211
0
{
4212
0
  isl_union_set *universe;
4213
0
  isl_union_set *domain;
4214
0
  isl_union_map *ext;
4215
0
  int depth;
4216
0
  int anchored;
4217
0
  isl_space *space;
4218
0
  isl_schedule_node *res;
4219
0
  isl_schedule_tree *tree;
4220
0
4221
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
4222
0
  if (anchored < 0)
4223
0
    return isl_schedule_node_free(node);
4224
0
  if (anchored)
4225
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_unsupported,
4226
0
      "cannot graft anchored tree with domain root",
4227
0
      return isl_schedule_node_free(node));
4228
0
4229
0
  depth = isl_schedule_node_get_schedule_depth(pos);
4230
0
  domain = isl_schedule_node_domain_get_domain(node);
4231
0
  space = isl_union_set_get_space(domain);
4232
0
  space = isl_space_set_from_params(space);
4233
0
  space = isl_space_add_dims(space, isl_dim_set, depth);
4234
0
  universe = isl_union_set_from_set(isl_set_universe(space));
4235
0
  ext = isl_union_map_from_domain_and_range(universe, domain);
4236
0
  res = isl_schedule_node_from_extension(ext);
4237
0
  node = isl_schedule_node_child(node, 0);
4238
0
  if (!node)
4239
0
    return isl_schedule_node_free(res);
4240
0
  if (!isl_schedule_tree_is_leaf(node->tree)) {
4241
0
    tree = isl_schedule_node_get_tree(node);
4242
0
    res = isl_schedule_node_child(res, 0);
4243
0
    res = isl_schedule_node_graft_tree(res, tree);
4244
0
    res = isl_schedule_node_parent(res);
4245
0
  }
4246
0
  isl_schedule_node_free(node);
4247
0
4248
0
  return res;
4249
0
}
4250
4251
/* Insert a node "graft" into the schedule tree of "node" such that it
4252
 * is executed before (if "before" is set) or after (if "before" is not set)
4253
 * the node that "node" points to.
4254
 * The root of "graft" may be either a domain or an extension node.
4255
 * In the latter case, the domain of the extension needs to correspond
4256
 * to the outer band nodes of "node".
4257
 * The elements of the domain or the range of the extension may not
4258
 * intersect with the domain elements that reach "node".
4259
 * The schedule tree of "graft" may not be anchored.
4260
 *
4261
 * The schedule tree of "node" is modified to include an extension node
4262
 * corresponding to the root node of "graft" as a child of the original
4263
 * parent of "node".  The original node that "node" points to and the
4264
 * child of the root node of "graft" are attached to this extension node
4265
 * through a sequence, with appropriate filters and with the child
4266
 * of "graft" appearing before or after the original "node".
4267
 *
4268
 * If "node" already appears inside a sequence that is the child of
4269
 * an extension node and if the spaces of the new domain elements
4270
 * do not overlap with those of the original domain elements,
4271
 * then that extension node is extended with the new extension
4272
 * rather than introducing a new segment of extension and sequence nodes.
4273
 *
4274
 * Return a pointer to the same node in the modified tree that
4275
 * "node" pointed to in the original tree.
4276
 */
4277
static __isl_give isl_schedule_node *isl_schedule_node_graft_before_or_after(
4278
  __isl_take isl_schedule_node *node, __isl_take isl_schedule_node *graft,
4279
  int before)
4280
24
{
4281
24
  if (!node || !graft)
4282
0
    goto error;
4283
24
  if (check_insert(node) < 0)
4284
0
    goto error;
4285
24
4286
24
  if (isl_schedule_node_get_type(graft) == isl_schedule_node_domain)
4287
0
    graft = extension_from_domain(graft, node);
4288
24
4289
24
  if (isl_schedule_node_get_type(graft) != isl_schedule_node_extension)
4290
24
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
4291
24
      "expecting domain or extension as root of graft",
4292
24
      goto error);
4293
24
4294
24
  return graft_extension(node, graft, before);
4295
0
error:
4296
0
  isl_schedule_node_free(node);
4297
0
  isl_schedule_node_free(graft);
4298
0
  return NULL;
4299
24
}
4300
4301
/* Insert a node "graft" into the schedule tree of "node" such that it
4302
 * is executed before the node that "node" points to.
4303
 * The root of "graft" may be either a domain or an extension node.
4304
 * In the latter case, the domain of the extension needs to correspond
4305
 * to the outer band nodes of "node".
4306
 * The elements of the domain or the range of the extension may not
4307
 * intersect with the domain elements that reach "node".
4308
 * The schedule tree of "graft" may not be anchored.
4309
 *
4310
 * Return a pointer to the same node in the modified tree that
4311
 * "node" pointed to in the original tree.
4312
 */
4313
__isl_give isl_schedule_node *isl_schedule_node_graft_before(
4314
  __isl_take isl_schedule_node *node, __isl_take isl_schedule_node *graft)
4315
24
{
4316
24
  return isl_schedule_node_graft_before_or_after(node, graft, 1);
4317
24
}
4318
4319
/* Insert a node "graft" into the schedule tree of "node" such that it
4320
 * is executed after the node that "node" points to.
4321
 * The root of "graft" may be either a domain or an extension node.
4322
 * In the latter case, the domain of the extension needs to correspond
4323
 * to the outer band nodes of "node".
4324
 * The elements of the domain or the range of the extension may not
4325
 * intersect with the domain elements that reach "node".
4326
 * The schedule tree of "graft" may not be anchored.
4327
 *
4328
 * Return a pointer to the same node in the modified tree that
4329
 * "node" pointed to in the original tree.
4330
 */
4331
__isl_give isl_schedule_node *isl_schedule_node_graft_after(
4332
  __isl_take isl_schedule_node *node,
4333
  __isl_take isl_schedule_node *graft)
4334
0
{
4335
0
  return isl_schedule_node_graft_before_or_after(node, graft, 0);
4336
0
}
4337
4338
/* Split the domain elements that reach "node" into those that satisfy
4339
 * "filter" and those that do not.  Arrange for the first subset to be
4340
 * executed before or after the second subset, depending on the value
4341
 * of "before".
4342
 * Return a pointer to the tree corresponding to the second subset,
4343
 * except when this subset is empty in which case the original pointer
4344
 * is returned.
4345
 * If both subsets are non-empty, then a sequence node is introduced
4346
 * to impose the order.  If the grandparent of the original node was
4347
 * itself a sequence, then the original child is replaced by two children
4348
 * in this sequence instead.
4349
 * The children in the sequence are copies of the original subtree,
4350
 * simplified with respect to their filters.
4351
 */
4352
static __isl_give isl_schedule_node *isl_schedule_node_order_before_or_after(
4353
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter,
4354
  int before)
4355
0
{
4356
0
  enum isl_schedule_node_type ancestors[] =
4357
0
    { isl_schedule_node_filter, isl_schedule_node_sequence };
4358
0
  isl_union_set *node_domain, *node_filter = NULL, *parent_filter;
4359
0
  isl_schedule_node *node2;
4360
0
  isl_schedule_tree *tree1, *tree2;
4361
0
  int empty1, empty2;
4362
0
  int in_seq;
4363
0
4364
0
  if (!node || !filter)
4365
0
    goto error;
4366
0
  if (check_insert(node) < 0)
4367
0
    goto error;
4368
0
4369
0
  in_seq = has_ancestors(node, 2, ancestors);
4370
0
  if (in_seq < 0)
4371
0
    goto error;
4372
0
  node_domain = isl_schedule_node_get_domain(node);
4373
0
  filter = isl_union_set_gist(filter, isl_union_set_copy(node_domain));
4374
0
  node_filter = isl_union_set_copy(node_domain);
4375
0
  node_filter = isl_union_set_subtract(node_filter,
4376
0
            isl_union_set_copy(filter));
4377
0
  node_filter = isl_union_set_gist(node_filter, node_domain);
4378
0
  empty1 = isl_union_set_is_empty(filter);
4379
0
  empty2 = isl_union_set_is_empty(node_filter);
4380
0
  if (empty1 < 0 || empty2 < 0)
4381
0
    goto error;
4382
0
  if (empty1 || empty2) {
4383
0
    isl_union_set_free(filter);
4384
0
    isl_union_set_free(node_filter);
4385
0
    return node;
4386
0
  }
4387
0
4388
0
  if (in_seq) {
4389
0
    node = isl_schedule_node_parent(node);
4390
0
    parent_filter = isl_schedule_node_filter_get_filter(node);
4391
0
    node_filter = isl_union_set_intersect(node_filter,
4392
0
              isl_union_set_copy(parent_filter));
4393
0
    filter = isl_union_set_intersect(filter, parent_filter);
4394
0
  }
4395
0
4396
0
  node2 = isl_schedule_node_copy(node);
4397
0
  node = isl_schedule_node_gist(node, isl_union_set_copy(node_filter));
4398
0
  node2 = isl_schedule_node_gist(node2, isl_union_set_copy(filter));
4399
0
  tree1 = isl_schedule_node_get_tree(node);
4400
0
  tree2 = isl_schedule_node_get_tree(node2);
4401
0
  tree1 = isl_schedule_tree_insert_filter(tree1, node_filter);
4402
0
  tree2 = isl_schedule_tree_insert_filter(tree2, filter);
4403
0
  isl_schedule_node_free(node2);
4404
0
4405
0
  if (before) {
4406
0
    tree1 = isl_schedule_tree_sequence_pair(tree2, tree1);
4407
0
    node = graft_or_splice(node, tree1, 1);
4408
0
  } else {
4409
0
    tree1 = isl_schedule_tree_sequence_pair(tree1, tree2);
4410
0
    node = graft_or_splice(node, tree1, 0);
4411
0
  }
4412
0
4413
0
  return node;
4414
0
error:
4415
0
  isl_schedule_node_free(node);
4416
0
  isl_union_set_free(filter);
4417
0
  isl_union_set_free(node_filter);
4418
0
  return NULL;
4419
0
}
4420
4421
/* Split the domain elements that reach "node" into those that satisfy
4422
 * "filter" and those that do not.  Arrange for the first subset to be
4423
 * executed before the second subset.
4424
 * Return a pointer to the tree corresponding to the second subset,
4425
 * except when this subset is empty in which case the original pointer
4426
 * is returned.
4427
 */
4428
__isl_give isl_schedule_node *isl_schedule_node_order_before(
4429
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
4430
0
{
4431
0
  return isl_schedule_node_order_before_or_after(node, filter, 1);
4432
0
}
4433
4434
/* Split the domain elements that reach "node" into those that satisfy
4435
 * "filter" and those that do not.  Arrange for the first subset to be
4436
 * executed after the second subset.
4437
 * Return a pointer to the tree corresponding to the second subset,
4438
 * except when this subset is empty in which case the original pointer
4439
 * is returned.
4440
 */
4441
__isl_give isl_schedule_node *isl_schedule_node_order_after(
4442
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
4443
0
{
4444
0
  return isl_schedule_node_order_before_or_after(node, filter, 0);
4445
0
}
4446
4447
/* Reset the user pointer on all identifiers of parameters and tuples
4448
 * in the schedule node "node".
4449
 */
4450
__isl_give isl_schedule_node *isl_schedule_node_reset_user(
4451
  __isl_take isl_schedule_node *node)
4452
0
{
4453
0
  isl_schedule_tree *tree;
4454
0
4455
0
  tree = isl_schedule_node_get_tree(node);
4456
0
  tree = isl_schedule_tree_reset_user(tree);
4457
0
  node = isl_schedule_node_graft_tree(node, tree);
4458
0
4459
0
  return node;
4460
0
}
4461
4462
/* Align the parameters of the schedule node "node" to those of "space".
4463
 */
4464
__isl_give isl_schedule_node *isl_schedule_node_align_params(
4465
  __isl_take isl_schedule_node *node, __isl_take isl_space *space)
4466
0
{
4467
0
  isl_schedule_tree *tree;
4468
0
4469
0
  tree = isl_schedule_node_get_tree(node);
4470
0
  tree = isl_schedule_tree_align_params(tree, space);
4471
0
  node = isl_schedule_node_graft_tree(node, tree);
4472
0
4473
0
  return node;
4474
0
}
4475
4476
/* Compute the pullback of schedule node "node"
4477
 * by the function represented by "upma".
4478
 * In other words, plug in "upma" in the iteration domains
4479
 * of schedule node "node".
4480
 * We currently do not handle expansion nodes.
4481
 *
4482
 * Note that this is only a helper function for
4483
 * isl_schedule_pullback_union_pw_multi_aff.  In order to maintain consistency,
4484
 * this function should not be called on a single node without also
4485
 * calling it on all the other nodes.
4486
 */
4487
__isl_give isl_schedule_node *isl_schedule_node_pullback_union_pw_multi_aff(
4488
  __isl_take isl_schedule_node *node,
4489
  __isl_take isl_union_pw_multi_aff *upma)
4490
1.03k
{
4491
1.03k
  isl_schedule_tree *tree;
4492
1.03k
4493
1.03k
  tree = isl_schedule_node_get_tree(node);
4494
1.03k
  tree = isl_schedule_tree_pullback_union_pw_multi_aff(tree, upma);
4495
1.03k
  node = isl_schedule_node_graft_tree(node, tree);
4496
1.03k
4497
1.03k
  return node;
4498
1.03k
}
4499
4500
/* Internal data structure for isl_schedule_node_expand.
4501
 * "tree" is the tree that needs to be plugged in in all the leaves.
4502
 * "domain" is the set of domain elements in the original leaves
4503
 * to which the tree applies.
4504
 */
4505
struct isl_schedule_expand_data {
4506
  isl_schedule_tree *tree;
4507
  isl_union_set *domain;
4508
};
4509
4510
/* If "node" is a leaf, then plug in data->tree, simplifying it
4511
 * within its new context.
4512
 *
4513
 * If there are any domain elements at the leaf where the tree
4514
 * should not be plugged in (i.e., there are elements not in data->domain)
4515
 * then first extend the tree to only apply to the elements in data->domain
4516
 * by constructing a set node that selects data->tree for elements
4517
 * in data->domain and a leaf for the other elements.
4518
 */
4519
static __isl_give isl_schedule_node *expand(__isl_take isl_schedule_node *node,
4520
  void *user)
4521
0
{
4522
0
  struct isl_schedule_expand_data *data = user;
4523
0
  isl_schedule_tree *tree, *leaf;
4524
0
  isl_union_set *domain, *left;
4525
0
  isl_bool empty;
4526
0
4527
0
  if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf)
4528
0
    return node;
4529
0
4530
0
  domain = isl_schedule_node_get_domain(node);
4531
0
  tree = isl_schedule_tree_copy(data->tree);
4532
0
4533
0
  left = isl_union_set_copy(domain);
4534
0
  left = isl_union_set_subtract(left, isl_union_set_copy(data->domain));
4535
0
  empty = isl_union_set_is_empty(left);
4536
0
  if (empty >= 0 && !empty) {
4537
0
    leaf = isl_schedule_node_get_leaf(node);
4538
0
    leaf = isl_schedule_tree_insert_filter(leaf, left);
4539
0
    left = isl_union_set_copy(data->domain);
4540
0
    tree = isl_schedule_tree_insert_filter(tree, left);
4541
0
    tree = isl_schedule_tree_set_pair(tree, leaf);
4542
0
  } else {
4543
0
    if (empty < 0)
4544
0
      node = isl_schedule_node_free(node);
4545
0
    isl_union_set_free(left);
4546
0
  }
4547
0
4548
0
  node = isl_schedule_node_graft_tree(node, tree);
4549
0
  node = isl_schedule_node_gist(node, domain);
4550
0
4551
0
  return node;
4552
0
}
4553
4554
/* Expand the tree rooted at "node" by extending all leaves
4555
 * with an expansion node with as child "tree".
4556
 * The expansion is determined by "contraction" and "domain".
4557
 * That is, the elements of "domain" are contracted according
4558
 * to "contraction".  The expansion relation is then the inverse
4559
 * of "contraction" with its range intersected with "domain".
4560
 *
4561
 * Insert the appropriate expansion node on top of "tree" and
4562
 * then plug in the result in all leaves of "node".
4563
 */
4564
__isl_give isl_schedule_node *isl_schedule_node_expand(
4565
  __isl_take isl_schedule_node *node,
4566
  __isl_take isl_union_pw_multi_aff *contraction,
4567
  __isl_take isl_union_set *domain,
4568
  __isl_take isl_schedule_tree *tree)
4569
0
{
4570
0
  struct isl_schedule_expand_data data;
4571
0
  isl_union_map *expansion;
4572
0
  isl_union_pw_multi_aff *copy;
4573
0
4574
0
  if (!node || !contraction || !tree)
4575
0
    node = isl_schedule_node_free(node);
4576
0
4577
0
  copy = isl_union_pw_multi_aff_copy(contraction);
4578
0
  expansion = isl_union_map_from_union_pw_multi_aff(copy);
4579
0
  expansion = isl_union_map_reverse(expansion);
4580
0
  expansion = isl_union_map_intersect_range(expansion, domain);
4581
0
  data.domain = isl_union_map_domain(isl_union_map_copy(expansion));
4582
0
4583
0
  tree = isl_schedule_tree_insert_expansion(tree, contraction, expansion);
4584
0
  data.tree = tree;
4585
0
4586
0
  node = isl_schedule_node_map_descendant_bottom_up(node, &expand, &data);
4587
0
  isl_union_set_free(data.domain);
4588
0
  isl_schedule_tree_free(data.tree);
4589
0
  return node;
4590
0
}
4591
4592
/* Return the position of the subtree containing "node" among the children
4593
 * of "ancestor".  "node" is assumed to be a descendant of "ancestor".
4594
 * In particular, both nodes should point to the same schedule tree.
4595
 *
4596
 * Return -1 on error.
4597
 */
4598
int isl_schedule_node_get_ancestor_child_position(
4599
  __isl_keep isl_schedule_node *node,
4600
  __isl_keep isl_schedule_node *ancestor)
4601
62.4k
{
4602
62.4k
  int n1, n2;
4603
62.4k
  isl_schedule_tree *tree;
4604
62.4k
4605
62.4k
  if (!node || !ancestor)
4606
0
    return -1;
4607
62.4k
4608
62.4k
  if (node->schedule != ancestor->schedule)
4609
62.4k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
4610
62.4k
      "not a descendant", return -1);
4611
62.4k
4612
62.4k
  n1 = isl_schedule_node_get_tree_depth(ancestor);
4613
62.4k
  n2 = isl_schedule_node_get_tree_depth(node);
4614
62.4k
4615
62.4k
  if (n1 >= n2)
4616
62.4k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
4617
62.4k
      "not a descendant", return -1);
4618
62.4k
  tree = isl_schedule_tree_list_get_schedule_tree(node->ancestors, n1);
4619
62.4k
  isl_schedule_tree_free(tree);
4620
62.4k
  if (tree != ancestor->tree)
4621
62.4k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
4622
62.4k
      "not a descendant", return -1);
4623
62.4k
4624
62.4k
  return node->child_pos[n1];
4625
62.4k
}
4626
4627
/* Given two nodes that point to the same schedule tree, return their
4628
 * closest shared ancestor.
4629
 *
4630
 * Since the two nodes point to the same schedule, they share at least
4631
 * one ancestor, the root of the schedule.  We move down from the root
4632
 * to the first ancestor where the respective children have a different
4633
 * child position.  This is the requested ancestor.
4634
 * If there is no ancestor where the children have a different position,
4635
 * then one node is an ancestor of the other and then this node is
4636
 * the requested ancestor.
4637
 */
4638
__isl_give isl_schedule_node *isl_schedule_node_get_shared_ancestor(
4639
  __isl_keep isl_schedule_node *node1,
4640
  __isl_keep isl_schedule_node *node2)
4641
61.5k
{
4642
61.5k
  int i, n1, n2;
4643
61.5k
4644
61.5k
  if (!node1 || !node2)
4645
0
    return NULL;
4646
61.5k
  if (node1->schedule != node2->schedule)
4647
61.5k
    
isl_die0
(isl_schedule_node_get_ctx(node1), isl_error_invalid,
4648
61.5k
      "not part of same schedule", return NULL);
4649
61.5k
  n1 = isl_schedule_node_get_tree_depth(node1);
4650
61.5k
  n2 = isl_schedule_node_get_tree_depth(node2);
4651
61.5k
  if (n2 < n1)
4652
2.29k
    return isl_schedule_node_get_shared_ancestor(node2, node1);
4653
59.2k
  if (n1 == 0)
4654
0
    return isl_schedule_node_copy(node1);
4655
59.2k
  if (isl_schedule_node_is_equal(node1, node2))
4656
28.0k
    return isl_schedule_node_copy(node1);
4657
31.2k
4658
77.9k
  
for (i = 0; 31.2k
i < n1;
++i46.7k
)
4659
77.9k
    if (node1->child_pos[i] != node2->child_pos[i])
4660
31.2k
      break;
4661
61.5k
4662
61.5k
  node1 = isl_schedule_node_copy(node1);
4663
61.5k
  return isl_schedule_node_ancestor(node1, n1 - i);
4664
61.5k
}
4665
4666
/* Print "node" to "p".
4667
 */
4668
__isl_give isl_printer *isl_printer_print_schedule_node(
4669
  __isl_take isl_printer *p, __isl_keep isl_schedule_node *node)
4670
0
{
4671
0
  if (!node)
4672
0
    return isl_printer_free(p);
4673
0
  return isl_printer_print_schedule_tree_mark(p, node->schedule->root,
4674
0
      isl_schedule_tree_list_n_schedule_tree(node->ancestors),
4675
0
      node->child_pos);
4676
0
}
4677
4678
void isl_schedule_node_dump(__isl_keep isl_schedule_node *node)
4679
0
{
4680
0
  isl_ctx *ctx;
4681
0
  isl_printer *printer;
4682
0
4683
0
  if (!node)
4684
0
    return;
4685
0
4686
0
  ctx = isl_schedule_node_get_ctx(node);
4687
0
  printer = isl_printer_to_file(ctx, stderr);
4688
0
  printer = isl_printer_set_yaml_style(printer, ISL_YAML_STYLE_BLOCK);
4689
0
  printer = isl_printer_print_schedule_node(printer, node);
4690
0
4691
0
  isl_printer_free(printer);
4692
0
}
4693
4694
/* Return a string representation of "node".
4695
 * Print the schedule node in block format as it would otherwise
4696
 * look identical to the entire schedule.
4697
 */
4698
__isl_give char *isl_schedule_node_to_str(__isl_keep isl_schedule_node *node)
4699
0
{
4700
0
  isl_printer *printer;
4701
0
  char *s;
4702
0
4703
0
  if (!node)
4704
0
    return NULL;
4705
0
4706
0
  printer = isl_printer_to_str(isl_schedule_node_get_ctx(node));
4707
0
  printer = isl_printer_set_yaml_style(printer, ISL_YAML_STYLE_BLOCK);
4708
0
  printer = isl_printer_print_schedule_node(printer, node);
4709
0
  s = isl_printer_get_str(printer);
4710
0
  isl_printer_free(printer);
4711
0
4712
0
  return s;
4713
0
}