Coverage Report

Created: 2018-12-13 20:48

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/polly/lib/External/isl/isl_schedule_node.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright 2013-2014 Ecole Normale Superieure
3
 * Copyright 2014      INRIA Rocquencourt
4
 * Copyright 2016      Sven Verdoolaege
5
 *
6
 * Use of this software is governed by the MIT license
7
 *
8
 * Written by Sven Verdoolaege,
9
 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
10
 * and Inria Paris - Rocquencourt, Domaine de Voluceau - Rocquencourt,
11
 * B.P. 105 - 78153 Le Chesnay, France
12
 */
13
14
#include <isl/id.h>
15
#include <isl/val.h>
16
#include <isl/space.h>
17
#include <isl/set.h>
18
#include <isl_schedule_band.h>
19
#include <isl_schedule_private.h>
20
#include <isl_schedule_node_private.h>
21
22
/* Create a new schedule node in the given schedule, point at the given
23
 * tree with given ancestors and child positions.
24
 * "child_pos" may be NULL if there are no ancestors.
25
 */
26
__isl_give isl_schedule_node *isl_schedule_node_alloc(
27
  __isl_take isl_schedule *schedule, __isl_take isl_schedule_tree *tree,
28
  __isl_take isl_schedule_tree_list *ancestors, int *child_pos)
29
61.3k
{
30
61.3k
  isl_ctx *ctx;
31
61.3k
  isl_schedule_node *node;
32
61.3k
  int i, n;
33
61.3k
34
61.3k
  if (!schedule || !tree || !ancestors)
35
0
    goto error;
36
61.3k
  n = isl_schedule_tree_list_n_schedule_tree(ancestors);
37
61.3k
  if (n > 0 && 
!child_pos40.0k
)
38
0
    goto error;
39
61.3k
  ctx = isl_schedule_get_ctx(schedule);
40
61.3k
  node = isl_calloc_type(ctx, isl_schedule_node);
41
61.3k
  if (!node)
42
0
    goto error;
43
61.3k
  node->ref = 1;
44
61.3k
  node->schedule = schedule;
45
61.3k
  node->tree = tree;
46
61.3k
  node->ancestors = ancestors;
47
61.3k
  node->child_pos = isl_alloc_array(ctx, int, n);
48
61.3k
  if (n && 
!node->child_pos40.0k
)
49
0
    return isl_schedule_node_free(node);
50
253k
  
for (i = 0; 61.3k
i < n;
++i191k
)
51
191k
    node->child_pos[i] = child_pos[i];
52
61.3k
53
61.3k
  return node;
54
0
error:
55
0
  isl_schedule_free(schedule);
56
0
  isl_schedule_tree_free(tree);
57
0
  isl_schedule_tree_list_free(ancestors);
58
0
  return NULL;
59
61.3k
}
60
61
/* Return a pointer to the root of a schedule tree with as single
62
 * node a domain node with the given domain.
63
 */
64
__isl_give isl_schedule_node *isl_schedule_node_from_domain(
65
  __isl_take isl_union_set *domain)
66
125
{
67
125
  isl_schedule *schedule;
68
125
  isl_schedule_node *node;
69
125
70
125
  schedule = isl_schedule_from_domain(domain);
71
125
  node = isl_schedule_get_root(schedule);
72
125
  isl_schedule_free(schedule);
73
125
74
125
  return node;
75
125
}
76
77
/* Return a pointer to the root of a schedule tree with as single
78
 * node a extension node with the given extension.
79
 */
80
__isl_give isl_schedule_node *isl_schedule_node_from_extension(
81
  __isl_take isl_union_map *extension)
82
24
{
83
24
  isl_ctx *ctx;
84
24
  isl_schedule *schedule;
85
24
  isl_schedule_tree *tree;
86
24
  isl_schedule_node *node;
87
24
88
24
  if (!extension)
89
0
    return NULL;
90
24
91
24
  ctx = isl_union_map_get_ctx(extension);
92
24
  tree = isl_schedule_tree_from_extension(extension);
93
24
  schedule = isl_schedule_from_schedule_tree(ctx, tree);
94
24
  node = isl_schedule_get_root(schedule);
95
24
  isl_schedule_free(schedule);
96
24
97
24
  return node;
98
24
}
99
100
/* Return the isl_ctx to which "node" belongs.
101
 */
102
isl_ctx *isl_schedule_node_get_ctx(__isl_keep isl_schedule_node *node)
103
44.1k
{
104
44.1k
  return node ? isl_schedule_get_ctx(node->schedule) : NULL;
105
44.1k
}
106
107
/* Return a pointer to the leaf of the schedule into which "node" points.
108
 */
109
__isl_keep isl_schedule_tree *isl_schedule_node_peek_leaf(
110
  __isl_keep isl_schedule_node *node)
111
21.3k
{
112
21.3k
  return node ? isl_schedule_peek_leaf(node->schedule) : NULL;
113
21.3k
}
114
115
/* Return a copy of the leaf of the schedule into which "node" points.
116
 */
117
__isl_give isl_schedule_tree *isl_schedule_node_get_leaf(
118
  __isl_keep isl_schedule_node *node)
119
19.8k
{
120
19.8k
  return isl_schedule_tree_copy(isl_schedule_node_peek_leaf(node));
121
19.8k
}
122
123
/* Return the type of the node or isl_schedule_node_error on error.
124
 */
125
enum isl_schedule_node_type isl_schedule_node_get_type(
126
  __isl_keep isl_schedule_node *node)
127
118k
{
128
118k
  return node ? isl_schedule_tree_get_type(node->tree)
129
118k
        : 
isl_schedule_node_error0
;
130
118k
}
131
132
/* Return the type of the parent of "node" or isl_schedule_node_error on error.
133
 */
134
enum isl_schedule_node_type isl_schedule_node_get_parent_type(
135
  __isl_keep isl_schedule_node *node)
136
3.20k
{
137
3.20k
  int pos;
138
3.20k
  int has_parent;
139
3.20k
  isl_schedule_tree *parent;
140
3.20k
  enum isl_schedule_node_type type;
141
3.20k
142
3.20k
  if (!node)
143
0
    return isl_schedule_node_error;
144
3.20k
  has_parent = isl_schedule_node_has_parent(node);
145
3.20k
  if (has_parent < 0)
146
0
    return isl_schedule_node_error;
147
3.20k
  if (!has_parent)
148
3.20k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
149
3.20k
      "node has no parent", return isl_schedule_node_error);
150
3.20k
151
3.20k
  pos = isl_schedule_tree_list_n_schedule_tree(node->ancestors) - 1;
152
3.20k
  parent = isl_schedule_tree_list_get_schedule_tree(node->ancestors, pos);
153
3.20k
  type = isl_schedule_tree_get_type(parent);
154
3.20k
  isl_schedule_tree_free(parent);
155
3.20k
156
3.20k
  return type;
157
3.20k
}
158
159
/* Return a copy of the subtree that this node points to.
160
 */
161
__isl_give isl_schedule_tree *isl_schedule_node_get_tree(
162
  __isl_keep isl_schedule_node *node)
163
6.17k
{
164
6.17k
  if (!node)
165
0
    return NULL;
166
6.17k
167
6.17k
  return isl_schedule_tree_copy(node->tree);
168
6.17k
}
169
170
/* Return a copy of the schedule into which "node" points.
171
 */
172
__isl_give isl_schedule *isl_schedule_node_get_schedule(
173
  __isl_keep isl_schedule_node *node)
174
6.26k
{
175
6.26k
  if (!node)
176
0
    return NULL;
177
6.26k
  return isl_schedule_copy(node->schedule);
178
6.26k
}
179
180
/* Return a fresh copy of "node".
181
 */
182
__isl_take isl_schedule_node *isl_schedule_node_dup(
183
  __isl_keep isl_schedule_node *node)
184
46.6k
{
185
46.6k
  if (!node)
186
0
    return NULL;
187
46.6k
188
46.6k
  return isl_schedule_node_alloc(isl_schedule_copy(node->schedule),
189
46.6k
        isl_schedule_tree_copy(node->tree),
190
46.6k
        isl_schedule_tree_list_copy(node->ancestors),
191
46.6k
        node->child_pos);
192
46.6k
}
193
194
/* Return an isl_schedule_node that is equal to "node" and that has only
195
 * a single reference.
196
 */
197
__isl_give isl_schedule_node *isl_schedule_node_cow(
198
  __isl_take isl_schedule_node *node)
199
132k
{
200
132k
  if (!node)
201
0
    return NULL;
202
132k
203
132k
  if (node->ref == 1)
204
85.4k
    return node;
205
46.6k
  node->ref--;
206
46.6k
  return isl_schedule_node_dup(node);
207
46.6k
}
208
209
/* Return a new reference to "node".
210
 */
211
__isl_give isl_schedule_node *isl_schedule_node_copy(
212
  __isl_keep isl_schedule_node *node)
213
117k
{
214
117k
  if (!node)
215
12.9k
    return NULL;
216
104k
217
104k
  node->ref++;
218
104k
  return node;
219
104k
}
220
221
/* Free "node" and return NULL.
222
 */
223
__isl_null isl_schedule_node *isl_schedule_node_free(
224
  __isl_take isl_schedule_node *node)
225
135k
{
226
135k
  if (!node)
227
15.7k
    return NULL;
228
119k
  if (--node->ref > 0)
229
58.1k
    return NULL;
230
61.3k
231
61.3k
  isl_schedule_tree_list_free(node->ancestors);
232
61.3k
  free(node->child_pos);
233
61.3k
  isl_schedule_tree_free(node->tree);
234
61.3k
  isl_schedule_free(node->schedule);
235
61.3k
  free(node);
236
61.3k
237
61.3k
  return NULL;
238
61.3k
}
239
240
/* Do "node1" and "node2" point to the same position in the same
241
 * schedule?
242
 */
243
isl_bool isl_schedule_node_is_equal(__isl_keep isl_schedule_node *node1,
244
  __isl_keep isl_schedule_node *node2)
245
60.5k
{
246
60.5k
  int i, n1, n2;
247
60.5k
248
60.5k
  if (!node1 || !node2)
249
0
    return isl_bool_error;
250
60.5k
  if (node1 == node2)
251
28.4k
    return isl_bool_true;
252
32.0k
  if (node1->schedule != node2->schedule)
253
0
    return isl_bool_false;
254
32.0k
255
32.0k
  n1 = isl_schedule_node_get_tree_depth(node1);
256
32.0k
  n2 = isl_schedule_node_get_tree_depth(node2);
257
32.0k
  if (n1 != n2)
258
6.73k
    return isl_bool_false;
259
59.5k
  
for (i = 0; 25.2k
i < n1;
++i34.2k
)
260
59.5k
    if (node1->child_pos[i] != node2->child_pos[i])
261
25.2k
      return isl_bool_false;
262
25.2k
263
25.2k
  
return isl_bool_true0
;
264
25.2k
}
265
266
/* Return the number of outer schedule dimensions of "node"
267
 * in its schedule tree.
268
 *
269
 * Return -1 on error.
270
 */
271
int isl_schedule_node_get_schedule_depth(__isl_keep isl_schedule_node *node)
272
62.9k
{
273
62.9k
  int i, n;
274
62.9k
  int depth = 0;
275
62.9k
276
62.9k
  if (!node)
277
0
    return -1;
278
62.9k
279
62.9k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
280
243k
  for (i = n - 1; i >= 0; 
--i180k
) {
281
180k
    isl_schedule_tree *tree;
282
180k
283
180k
    tree = isl_schedule_tree_list_get_schedule_tree(
284
180k
                node->ancestors, i);
285
180k
    if (!tree)
286
0
      return -1;
287
180k
    if (tree->type == isl_schedule_node_band)
288
61.4k
      depth += isl_schedule_tree_band_n_member(tree);
289
180k
    isl_schedule_tree_free(tree);
290
180k
  }
291
62.9k
292
62.9k
  return depth;
293
62.9k
}
294
295
/* Internal data structure for
296
 * isl_schedule_node_get_prefix_schedule_union_pw_multi_aff
297
 *
298
 * "initialized" is set if the filter field has been initialized.
299
 * If "universe_domain" is not set, then the collected filter is intersected
300
 * with the domain of the root domain node.
301
 * "universe_filter" is set if we are only collecting the universes of filters
302
 * "collect_prefix" is set if we are collecting prefixes.
303
 * "filter" collects all outer filters and is NULL until "initialized" is set.
304
 * "prefix" collects all outer band partial schedules (if "collect_prefix"
305
 * is set).  If it is used, then it is initialized by the caller
306
 * of collect_filter_prefix to a zero-dimensional function.
307
 */
308
struct isl_schedule_node_get_filter_prefix_data {
309
  int initialized;
310
  int universe_domain;
311
  int universe_filter;
312
  int collect_prefix;
313
  isl_union_set *filter;
314
  isl_multi_union_pw_aff *prefix;
315
};
316
317
static int collect_filter_prefix(__isl_keep isl_schedule_tree_list *list,
318
  int n, struct isl_schedule_node_get_filter_prefix_data *data);
319
320
/* Update the filter and prefix information in "data" based on the first "n"
321
 * elements in "list" and the expansion tree root "tree".
322
 *
323
 * We first collect the information from the elements in "list",
324
 * initializing the filter based on the domain of the expansion.
325
 * Then we map the results to the expanded space and combined them
326
 * with the results already in "data".
327
 */
328
static int collect_filter_prefix_expansion(__isl_take isl_schedule_tree *tree,
329
  __isl_keep isl_schedule_tree_list *list, int n,
330
  struct isl_schedule_node_get_filter_prefix_data *data)
331
6
{
332
6
  struct isl_schedule_node_get_filter_prefix_data contracted;
333
6
  isl_union_pw_multi_aff *c;
334
6
  isl_union_map *exp, *universe;
335
6
  isl_union_set *filter;
336
6
337
6
  c = isl_schedule_tree_expansion_get_contraction(tree);
338
6
  exp = isl_schedule_tree_expansion_get_expansion(tree);
339
6
340
6
  contracted.initialized = 1;
341
6
  contracted.universe_domain = data->universe_domain;
342
6
  contracted.universe_filter = data->universe_filter;
343
6
  contracted.collect_prefix = data->collect_prefix;
344
6
  universe = isl_union_map_universe(isl_union_map_copy(exp));
345
6
  filter = isl_union_map_domain(universe);
346
6
  if (data->collect_prefix) {
347
3
    isl_space *space = isl_union_set_get_space(filter);
348
3
    space = isl_space_set_from_params(space);
349
3
    contracted.prefix = isl_multi_union_pw_aff_zero(space);
350
3
  }
351
6
  contracted.filter = filter;
352
6
353
6
  if (collect_filter_prefix(list, n, &contracted) < 0)
354
0
    contracted.filter = isl_union_set_free(contracted.filter);
355
6
  if (data->collect_prefix) {
356
3
    isl_multi_union_pw_aff *prefix;
357
3
358
3
    prefix = contracted.prefix;
359
3
    prefix =
360
3
        isl_multi_union_pw_aff_pullback_union_pw_multi_aff(prefix,
361
3
            isl_union_pw_multi_aff_copy(c));
362
3
    data->prefix = isl_multi_union_pw_aff_flat_range_product(
363
3
            prefix, data->prefix);
364
3
  }
365
6
  filter = contracted.filter;
366
6
  if (data->universe_domain)
367
3
    filter = isl_union_set_preimage_union_pw_multi_aff(filter,
368
3
            isl_union_pw_multi_aff_copy(c));
369
3
  else
370
3
    filter = isl_union_set_apply(filter, isl_union_map_copy(exp));
371
6
  if (!data->initialized)
372
1
    data->filter = filter;
373
5
  else
374
5
    data->filter = isl_union_set_intersect(filter, data->filter);
375
6
  data->initialized = 1;
376
6
377
6
  isl_union_pw_multi_aff_free(c);
378
6
  isl_union_map_free(exp);
379
6
  isl_schedule_tree_free(tree);
380
6
381
6
  return 0;
382
6
}
383
384
/* Update the filter information in "data" based on the first "n"
385
 * elements in "list" and the extension tree root "tree", in case
386
 * data->universe_domain is set and data->collect_prefix is not.
387
 *
388
 * We collect the universe domain of the elements in "list" and
389
 * add it to the universe range of the extension (intersected
390
 * with the already collected filter, if any).
391
 */
392
static int collect_universe_domain_extension(__isl_take isl_schedule_tree *tree,
393
  __isl_keep isl_schedule_tree_list *list, int n,
394
  struct isl_schedule_node_get_filter_prefix_data *data)
395
12
{
396
12
  struct isl_schedule_node_get_filter_prefix_data data_outer;
397
12
  isl_union_map *extension;
398
12
  isl_union_set *filter;
399
12
400
12
  data_outer.initialized = 0;
401
12
  data_outer.universe_domain = 1;
402
12
  data_outer.universe_filter = data->universe_filter;
403
12
  data_outer.collect_prefix = 0;
404
12
  data_outer.filter = NULL;
405
12
  data_outer.prefix = NULL;
406
12
407
12
  if (collect_filter_prefix(list, n, &data_outer) < 0)
408
0
    data_outer.filter = isl_union_set_free(data_outer.filter);
409
12
410
12
  extension = isl_schedule_tree_extension_get_extension(tree);
411
12
  extension = isl_union_map_universe(extension);
412
12
  filter = isl_union_map_range(extension);
413
12
  if (data_outer.initialized)
414
12
    filter = isl_union_set_union(filter, data_outer.filter);
415
12
  if (data->initialized)
416
12
    filter = isl_union_set_intersect(filter, data->filter);
417
12
418
12
  data->filter = filter;
419
12
420
12
  isl_schedule_tree_free(tree);
421
12
422
12
  return 0;
423
12
}
424
425
/* Update "data" based on the tree node "tree" in case "data" has
426
 * not been initialized yet.
427
 *
428
 * Return 0 on success and -1 on error.
429
 *
430
 * If "tree" is a filter, then we set data->filter to this filter
431
 * (or its universe).
432
 * If "tree" is a domain, then this means we have reached the root
433
 * of the schedule tree without being able to extract any information.
434
 * We therefore initialize data->filter to the universe of the domain,
435
 * or the domain itself if data->universe_domain is not set.
436
 * If "tree" is a band with at least one member, then we set data->filter
437
 * to the universe of the schedule domain and replace the zero-dimensional
438
 * data->prefix by the band schedule (if data->collect_prefix is set).
439
 */
440
static int collect_filter_prefix_init(__isl_keep isl_schedule_tree *tree,
441
  struct isl_schedule_node_get_filter_prefix_data *data)
442
8.12k
{
443
8.12k
  enum isl_schedule_node_type type;
444
8.12k
  isl_multi_union_pw_aff *mupa;
445
8.12k
  isl_union_set *filter;
446
8.12k
447
8.12k
  type = isl_schedule_tree_get_type(tree);
448
8.12k
  switch (type) {
449
8.12k
  case isl_schedule_node_error:
450
0
    return -1;
451
8.12k
  case isl_schedule_node_expansion:
452
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_internal,
453
0
      "should be handled by caller", return -1);
454
0
  case isl_schedule_node_extension:
455
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_invalid,
456
0
      "cannot handle extension nodes", return -1);
457
15
  case isl_schedule_node_context:
458
15
  case isl_schedule_node_leaf:
459
15
  case isl_schedule_node_guard:
460
15
  case isl_schedule_node_mark:
461
15
  case isl_schedule_node_sequence:
462
15
  case isl_schedule_node_set:
463
15
    return 0;
464
318
  case isl_schedule_node_domain:
465
318
    filter = isl_schedule_tree_domain_get_domain(tree);
466
318
    if (data->universe_domain)
467
176
      filter = isl_union_set_universe(filter);
468
318
    data->filter = filter;
469
318
    break;
470
4.64k
  case isl_schedule_node_band:
471
4.64k
    if (isl_schedule_tree_band_n_member(tree) == 0)
472
0
      return 0;
473
4.64k
    mupa = isl_schedule_tree_band_get_partial_schedule(tree);
474
4.64k
    if (data->collect_prefix) {
475
2.30k
      isl_multi_union_pw_aff_free(data->prefix);
476
2.30k
      mupa = isl_multi_union_pw_aff_reset_tuple_id(mupa,
477
2.30k
                isl_dim_set);
478
2.30k
      data->prefix = isl_multi_union_pw_aff_copy(mupa);
479
2.30k
    }
480
4.64k
    filter = isl_multi_union_pw_aff_domain(mupa);
481
4.64k
    filter = isl_union_set_universe(filter);
482
4.64k
    data->filter = filter;
483
4.64k
    break;
484
4.64k
  case isl_schedule_node_filter:
485
3.15k
    filter = isl_schedule_tree_filter_get_filter(tree);
486
3.15k
    if (data->universe_filter)
487
1.57k
      filter = isl_union_set_universe(filter);
488
3.15k
    data->filter = filter;
489
3.15k
    break;
490
8.11k
  }
491
8.11k
492
8.11k
  if ((data->collect_prefix && 
!data->prefix4.02k
) || !data->filter)
493
0
    return -1;
494
8.11k
495
8.11k
  data->initialized = 1;
496
8.11k
497
8.11k
  return 0;
498
8.11k
}
499
500
/* Update "data" based on the tree node "tree" in case "data" has
501
 * already been initialized.
502
 *
503
 * Return 0 on success and -1 on error.
504
 *
505
 * If "tree" is a domain and data->universe_domain is not set, then
506
 * intersect data->filter with the domain.
507
 * If "tree" is a filter, then we intersect data->filter with this filter
508
 * (or its universe).
509
 * If "tree" is a band with at least one member and data->collect_prefix
510
 * is set, then we extend data->prefix with the band schedule.
511
 * If "tree" is an extension, then we make sure that we are not collecting
512
 * information on any extended domain elements.
513
 */
514
static int collect_filter_prefix_update(__isl_keep isl_schedule_tree *tree,
515
  struct isl_schedule_node_get_filter_prefix_data *data)
516
20.5k
{
517
20.5k
  enum isl_schedule_node_type type;
518
20.5k
  isl_multi_union_pw_aff *mupa;
519
20.5k
  isl_union_set *filter;
520
20.5k
  isl_union_map *extension;
521
20.5k
  int empty;
522
20.5k
523
20.5k
  type = isl_schedule_tree_get_type(tree);
524
20.5k
  switch (type) {
525
20.5k
  case isl_schedule_node_error:
526
0
    return -1;
527
20.5k
  case isl_schedule_node_expansion:
528
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_internal,
529
0
      "should be handled by caller", return -1);
530
12
  case isl_schedule_node_extension:
531
12
    extension = isl_schedule_tree_extension_get_extension(tree);
532
12
    extension = isl_union_map_intersect_range(extension,
533
12
          isl_union_set_copy(data->filter));
534
12
    empty = isl_union_map_is_empty(extension);
535
12
    isl_union_map_free(extension);
536
12
    if (empty < 0)
537
0
      return -1;
538
12
    if (empty)
539
12
      break;
540
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_invalid,
541
0
      "cannot handle extension nodes", return -1);
542
5.64k
  case isl_schedule_node_context:
543
5.64k
  case isl_schedule_node_leaf:
544
5.64k
  case isl_schedule_node_guard:
545
5.64k
  case isl_schedule_node_mark:
546
5.64k
  case isl_schedule_node_sequence:
547
5.64k
  case isl_schedule_node_set:
548
5.64k
    break;
549
7.78k
  case isl_schedule_node_domain:
550
7.78k
    if (data->universe_domain)
551
3.89k
      break;
552
3.89k
    filter = isl_schedule_tree_domain_get_domain(tree);
553
3.89k
    data->filter = isl_union_set_intersect(data->filter, filter);
554
3.89k
    break;
555
4.90k
  case isl_schedule_node_band:
556
4.90k
    if (isl_schedule_tree_band_n_member(tree) == 0)
557
0
      break;
558
4.90k
    if (!data->collect_prefix)
559
2.41k
      break;
560
2.49k
    mupa = isl_schedule_tree_band_get_partial_schedule(tree);
561
2.49k
    data->prefix = isl_multi_union_pw_aff_flat_range_product(mupa,
562
2.49k
                data->prefix);
563
2.49k
    if (!data->prefix)
564
0
      return -1;
565
2.49k
    break;
566
2.49k
  case isl_schedule_node_filter:
567
2.21k
    filter = isl_schedule_tree_filter_get_filter(tree);
568
2.21k
    if (data->universe_filter)
569
1.10k
      filter = isl_union_set_universe(filter);
570
2.21k
    data->filter = isl_union_set_intersect(data->filter, filter);
571
2.21k
    if (!data->filter)
572
0
      return -1;
573
2.21k
    break;
574
20.5k
  }
575
20.5k
576
20.5k
  return 0;
577
20.5k
}
578
579
/* Collect filter and/or prefix information from the first "n"
580
 * elements in "list" (which represent the ancestors of a node).
581
 * Store the results in "data".
582
 *
583
 * Extension nodes are only supported if they do not affect the outcome,
584
 * i.e., if we are collecting information on non-extended domain elements,
585
 * or if we are collecting the universe domain (without prefix).
586
 *
587
 * Return 0 on success and -1 on error.
588
 *
589
 * We traverse the list from innermost ancestor (last element)
590
 * to outermost ancestor (first element), calling collect_filter_prefix_init
591
 * on each node as long as we have not been able to extract any information
592
 * yet and collect_filter_prefix_update afterwards.
593
 * If we come across an expansion node, then we interrupt the traversal
594
 * and call collect_filter_prefix_expansion to restart the traversal
595
 * over the remaining ancestors and to combine the results with those
596
 * that have already been collected.
597
 * If we come across an extension node and we are only computing
598
 * the universe domain, then we interrupt the traversal and call
599
 * collect_universe_domain_extension to restart the traversal
600
 * over the remaining ancestors and to combine the results with those
601
 * that have already been collected.
602
 * On successful return, data->initialized will be set since the outermost
603
 * ancestor is a domain node, which always results in an initialization.
604
 */
605
static int collect_filter_prefix(__isl_keep isl_schedule_tree_list *list,
606
  int n, struct isl_schedule_node_get_filter_prefix_data *data)
607
8.12k
{
608
8.12k
  int i;
609
8.12k
610
8.12k
  if (!list)
611
0
    return -1;
612
8.12k
613
36.8k
  
for (i = n - 1; 8.12k
i >= 0;
--i28.7k
) {
614
28.7k
    isl_schedule_tree *tree;
615
28.7k
    enum isl_schedule_node_type type;
616
28.7k
    int r;
617
28.7k
618
28.7k
    tree = isl_schedule_tree_list_get_schedule_tree(list, i);
619
28.7k
    if (!tree)
620
0
      return -1;
621
28.7k
    type = isl_schedule_tree_get_type(tree);
622
28.7k
    if (type == isl_schedule_node_expansion)
623
6
      return collect_filter_prefix_expansion(tree, list, i,
624
6
                data);
625
28.7k
    if (type == isl_schedule_node_extension &&
626
28.7k
        
data->universe_domain24
&&
!data->collect_prefix12
)
627
12
      return collect_universe_domain_extension(tree, list, i,
628
12
                data);
629
28.7k
    if (!data->initialized)
630
8.12k
      r = collect_filter_prefix_init(tree, data);
631
20.5k
    else
632
20.5k
      r = collect_filter_prefix_update(tree, data);
633
28.7k
    isl_schedule_tree_free(tree);
634
28.7k
    if (r < 0)
635
0
      return -1;
636
28.7k
  }
637
8.12k
638
8.12k
  
return 08.10k
;
639
8.12k
}
640
641
/* Return the concatenation of the partial schedules of all outer band
642
 * nodes of "node" interesected with all outer filters
643
 * as an isl_multi_union_pw_aff.
644
 * None of the ancestors of "node" may be an extension node, unless
645
 * there is also a filter ancestor that filters out all the extended
646
 * domain elements.
647
 *
648
 * If "node" is pointing at the root of the schedule tree, then
649
 * there are no domain elements reaching the current node, so
650
 * we return an empty result.
651
 *
652
 * We collect all the filters and partial schedules in collect_filter_prefix
653
 * and intersect the domain of the combined schedule with the combined filter.
654
 */
655
__isl_give isl_multi_union_pw_aff *
656
isl_schedule_node_get_prefix_schedule_multi_union_pw_aff(
657
  __isl_keep isl_schedule_node *node)
658
4
{
659
4
  int n;
660
4
  isl_space *space;
661
4
  struct isl_schedule_node_get_filter_prefix_data data;
662
4
663
4
  if (!node)
664
0
    return NULL;
665
4
666
4
  space = isl_schedule_get_space(node->schedule);
667
4
  space = isl_space_set_from_params(space);
668
4
  if (node->tree == node->schedule->root)
669
0
    return isl_multi_union_pw_aff_zero(space);
670
4
671
4
  data.initialized = 0;
672
4
  data.universe_domain = 1;
673
4
  data.universe_filter = 0;
674
4
  data.collect_prefix = 1;
675
4
  data.filter = NULL;
676
4
  data.prefix = isl_multi_union_pw_aff_zero(space);
677
4
678
4
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
679
4
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
680
0
    data.prefix = isl_multi_union_pw_aff_free(data.prefix);
681
4
682
4
  data.prefix = isl_multi_union_pw_aff_intersect_domain(data.prefix,
683
4
                data.filter);
684
4
685
4
  return data.prefix;
686
4
}
687
688
/* Return the concatenation of the partial schedules of all outer band
689
 * nodes of "node" interesected with all outer filters
690
 * as an isl_union_pw_multi_aff.
691
 * None of the ancestors of "node" may be an extension node, unless
692
 * there is also a filter ancestor that filters out all the extended
693
 * domain elements.
694
 *
695
 * If "node" is pointing at the root of the schedule tree, then
696
 * there are no domain elements reaching the current node, so
697
 * we return an empty result.
698
 *
699
 * We collect all the filters and partial schedules in collect_filter_prefix.
700
 * The partial schedules are collected as an isl_multi_union_pw_aff.
701
 * If this isl_multi_union_pw_aff is zero-dimensional, then it does not
702
 * contain any domain information, so we construct the isl_union_pw_multi_aff
703
 * result as a zero-dimensional function on the collected filter.
704
 * Otherwise, we convert the isl_multi_union_pw_aff to
705
 * an isl_multi_union_pw_aff and intersect the domain with the filter.
706
 */
707
__isl_give isl_union_pw_multi_aff *
708
isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(
709
  __isl_keep isl_schedule_node *node)
710
16
{
711
16
  int n;
712
16
  isl_space *space;
713
16
  isl_union_pw_multi_aff *prefix;
714
16
  struct isl_schedule_node_get_filter_prefix_data data;
715
16
716
16
  if (!node)
717
0
    return NULL;
718
16
719
16
  space = isl_schedule_get_space(node->schedule);
720
16
  if (node->tree == node->schedule->root)
721
0
    return isl_union_pw_multi_aff_empty(space);
722
16
723
16
  space = isl_space_set_from_params(space);
724
16
  data.initialized = 0;
725
16
  data.universe_domain = 1;
726
16
  data.universe_filter = 0;
727
16
  data.collect_prefix = 1;
728
16
  data.filter = NULL;
729
16
  data.prefix = isl_multi_union_pw_aff_zero(space);
730
16
731
16
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
732
16
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
733
0
    data.prefix = isl_multi_union_pw_aff_free(data.prefix);
734
16
735
16
  if (data.prefix &&
736
16
      isl_multi_union_pw_aff_dim(data.prefix, isl_dim_set) == 0) {
737
0
    isl_multi_union_pw_aff_free(data.prefix);
738
0
    prefix = isl_union_pw_multi_aff_from_domain(data.filter);
739
16
  } else {
740
16
    prefix =
741
16
        isl_union_pw_multi_aff_from_multi_union_pw_aff(data.prefix);
742
16
    prefix = isl_union_pw_multi_aff_intersect_domain(prefix,
743
16
                data.filter);
744
16
  }
745
16
746
16
  return prefix;
747
16
}
748
749
/* Return the concatenation of the partial schedules of all outer band
750
 * nodes of "node" interesected with all outer filters
751
 * as an isl_union_map.
752
 */
753
__isl_give isl_union_map *isl_schedule_node_get_prefix_schedule_union_map(
754
  __isl_keep isl_schedule_node *node)
755
14
{
756
14
  isl_union_pw_multi_aff *upma;
757
14
758
14
  upma = isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(node);
759
14
  return isl_union_map_from_union_pw_multi_aff(upma);
760
14
}
761
762
/* Return the concatenation of the partial schedules of all outer band
763
 * nodes of "node" intersected with all outer domain constraints.
764
 * None of the ancestors of "node" may be an extension node, unless
765
 * there is also a filter ancestor that filters out all the extended
766
 * domain elements.
767
 *
768
 * Essentially, this function intersects the domain of the output
769
 * of isl_schedule_node_get_prefix_schedule_union_map with the output
770
 * of isl_schedule_node_get_domain, except that it only traverses
771
 * the ancestors of "node" once.
772
 */
773
__isl_give isl_union_map *isl_schedule_node_get_prefix_schedule_relation(
774
  __isl_keep isl_schedule_node *node)
775
4.00k
{
776
4.00k
  int n;
777
4.00k
  isl_space *space;
778
4.00k
  isl_union_map *prefix;
779
4.00k
  struct isl_schedule_node_get_filter_prefix_data data;
780
4.00k
781
4.00k
  if (!node)
782
0
    return NULL;
783
4.00k
784
4.00k
  space = isl_schedule_get_space(node->schedule);
785
4.00k
  if (node->tree == node->schedule->root)
786
0
    return isl_union_map_empty(space);
787
4.00k
788
4.00k
  space = isl_space_set_from_params(space);
789
4.00k
  data.initialized = 0;
790
4.00k
  data.universe_domain = 0;
791
4.00k
  data.universe_filter = 0;
792
4.00k
  data.collect_prefix = 1;
793
4.00k
  data.filter = NULL;
794
4.00k
  data.prefix = isl_multi_union_pw_aff_zero(space);
795
4.00k
796
4.00k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
797
4.00k
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
798
0
    data.prefix = isl_multi_union_pw_aff_free(data.prefix);
799
4.00k
800
4.00k
  if (data.prefix &&
801
4.00k
      isl_multi_union_pw_aff_dim(data.prefix, isl_dim_set) == 0) {
802
408
    isl_multi_union_pw_aff_free(data.prefix);
803
408
    prefix = isl_union_map_from_domain(data.filter);
804
3.59k
  } else {
805
3.59k
    prefix = isl_union_map_from_multi_union_pw_aff(data.prefix);
806
3.59k
    prefix = isl_union_map_intersect_domain(prefix, data.filter);
807
3.59k
  }
808
4.00k
809
4.00k
  return prefix;
810
4.00k
}
811
812
/* Return the domain elements that reach "node".
813
 *
814
 * If "node" is pointing at the root of the schedule tree, then
815
 * there are no domain elements reaching the current node, so
816
 * we return an empty result.
817
 * None of the ancestors of "node" may be an extension node, unless
818
 * there is also a filter ancestor that filters out all the extended
819
 * domain elements.
820
 *
821
 * Otherwise, we collect all filters reaching the node,
822
 * intersected with the root domain in collect_filter_prefix.
823
 */
824
__isl_give isl_union_set *isl_schedule_node_get_domain(
825
  __isl_keep isl_schedule_node *node)
826
32
{
827
32
  int n;
828
32
  struct isl_schedule_node_get_filter_prefix_data data;
829
32
830
32
  if (!node)
831
0
    return NULL;
832
32
833
32
  if (node->tree == node->schedule->root) {
834
0
    isl_space *space;
835
0
836
0
    space = isl_schedule_get_space(node->schedule);
837
0
    return isl_union_set_empty(space);
838
0
  }
839
32
840
32
  data.initialized = 0;
841
32
  data.universe_domain = 0;
842
32
  data.universe_filter = 0;
843
32
  data.collect_prefix = 0;
844
32
  data.filter = NULL;
845
32
  data.prefix = NULL;
846
32
847
32
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
848
32
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
849
0
    data.filter = isl_union_set_free(data.filter);
850
32
851
32
  return data.filter;
852
32
}
853
854
/* Return the union of universe sets of the domain elements that reach "node".
855
 *
856
 * If "node" is pointing at the root of the schedule tree, then
857
 * there are no domain elements reaching the current node, so
858
 * we return an empty result.
859
 *
860
 * Otherwise, we collect the universes of all filters reaching the node
861
 * in collect_filter_prefix.
862
 */
863
__isl_give isl_union_set *isl_schedule_node_get_universe_domain(
864
  __isl_keep isl_schedule_node *node)
865
4.04k
{
866
4.04k
  int n;
867
4.04k
  struct isl_schedule_node_get_filter_prefix_data data;
868
4.04k
869
4.04k
  if (!node)
870
0
    return NULL;
871
4.04k
872
4.04k
  if (node->tree == node->schedule->root) {
873
0
    isl_space *space;
874
0
875
0
    space = isl_schedule_get_space(node->schedule);
876
0
    return isl_union_set_empty(space);
877
0
  }
878
4.04k
879
4.04k
  data.initialized = 0;
880
4.04k
  data.universe_domain = 1;
881
4.04k
  data.universe_filter = 1;
882
4.04k
  data.collect_prefix = 0;
883
4.04k
  data.filter = NULL;
884
4.04k
  data.prefix = NULL;
885
4.04k
886
4.04k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
887
4.04k
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
888
0
    data.filter = isl_union_set_free(data.filter);
889
4.04k
890
4.04k
  return data.filter;
891
4.04k
}
892
893
/* Return the subtree schedule of "node".
894
 *
895
 * Since isl_schedule_tree_get_subtree_schedule_union_map does not handle
896
 * trees that do not contain any schedule information, we first
897
 * move down to the first relevant descendant and handle leaves ourselves.
898
 *
899
 * If the subtree rooted at "node" contains any expansion nodes, then
900
 * the returned subtree schedule is formulated in terms of the expanded
901
 * domains.
902
 * The subtree is not allowed to contain any extension nodes.
903
 */
904
__isl_give isl_union_map *isl_schedule_node_get_subtree_schedule_union_map(
905
  __isl_keep isl_schedule_node *node)
906
1.50k
{
907
1.50k
  isl_schedule_tree *tree, *leaf;
908
1.50k
  isl_union_map *umap;
909
1.50k
910
1.50k
  tree = isl_schedule_node_get_tree(node);
911
1.50k
  leaf = isl_schedule_node_peek_leaf(node);
912
1.50k
  tree = isl_schedule_tree_first_schedule_descendant(tree, leaf);
913
1.50k
  if (!tree)
914
0
    return NULL;
915
1.50k
  if (tree == leaf) {
916
27
    isl_union_set *domain;
917
27
    domain = isl_schedule_node_get_universe_domain(node);
918
27
    isl_schedule_tree_free(tree);
919
27
    return isl_union_map_from_domain(domain);
920
27
  }
921
1.47k
922
1.47k
  umap = isl_schedule_tree_get_subtree_schedule_union_map(tree);
923
1.47k
  isl_schedule_tree_free(tree);
924
1.47k
  return umap;
925
1.47k
}
926
927
/* Return the number of ancestors of "node" in its schedule tree.
928
 */
929
int isl_schedule_node_get_tree_depth(__isl_keep isl_schedule_node *node)
930
472k
{
931
472k
  if (!node)
932
0
    return -1;
933
472k
  return isl_schedule_tree_list_n_schedule_tree(node->ancestors);
934
472k
}
935
936
/* Does "node" have a parent?
937
 *
938
 * That is, does it point to any node of the schedule other than the root?
939
 */
940
isl_bool isl_schedule_node_has_parent(__isl_keep isl_schedule_node *node)
941
91.4k
{
942
91.4k
  if (!node)
943
0
    return isl_bool_error;
944
91.4k
  if (!node->ancestors)
945
0
    return isl_bool_error;
946
91.4k
947
91.4k
  return isl_schedule_tree_list_n_schedule_tree(node->ancestors) != 0;
948
91.4k
}
949
950
/* Return the position of "node" among the children of its parent.
951
 */
952
int isl_schedule_node_get_child_position(__isl_keep isl_schedule_node *node)
953
0
{
954
0
  int n;
955
0
  int has_parent;
956
0
957
0
  if (!node)
958
0
    return -1;
959
0
  has_parent = isl_schedule_node_has_parent(node);
960
0
  if (has_parent < 0)
961
0
    return -1;
962
0
  if (!has_parent)
963
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
964
0
      "node has no parent", return -1);
965
0
966
0
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
967
0
  return node->child_pos[n - 1];
968
0
}
969
970
/* Does the parent (if any) of "node" have any children with a smaller child
971
 * position than this one?
972
 */
973
isl_bool isl_schedule_node_has_previous_sibling(
974
  __isl_keep isl_schedule_node *node)
975
0
{
976
0
  int n;
977
0
  isl_bool has_parent;
978
0
979
0
  if (!node)
980
0
    return isl_bool_error;
981
0
  has_parent = isl_schedule_node_has_parent(node);
982
0
  if (has_parent < 0 || !has_parent)
983
0
    return has_parent;
984
0
985
0
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
986
0
987
0
  return node->child_pos[n - 1] > 0;
988
0
}
989
990
/* Does the parent (if any) of "node" have any children with a greater child
991
 * position than this one?
992
 */
993
isl_bool isl_schedule_node_has_next_sibling(__isl_keep isl_schedule_node *node)
994
50.5k
{
995
50.5k
  int n, n_child;
996
50.5k
  isl_bool has_parent;
997
50.5k
  isl_schedule_tree *tree;
998
50.5k
999
50.5k
  if (!node)
1000
0
    return isl_bool_error;
1001
50.5k
  has_parent = isl_schedule_node_has_parent(node);
1002
50.5k
  if (has_parent < 0 || !has_parent)
1003
0
    return has_parent;
1004
50.5k
1005
50.5k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
1006
50.5k
  tree = isl_schedule_tree_list_get_schedule_tree(node->ancestors, n - 1);
1007
50.5k
  if (!tree)
1008
0
    return isl_bool_error;
1009
50.5k
  n_child = isl_schedule_tree_list_n_schedule_tree(tree->children);
1010
50.5k
  isl_schedule_tree_free(tree);
1011
50.5k
1012
50.5k
  return node->child_pos[n - 1] + 1 < n_child;
1013
50.5k
}
1014
1015
/* Does "node" have any children?
1016
 *
1017
 * Any node other than the leaf nodes is considered to have at least
1018
 * one child, even if the corresponding isl_schedule_tree does not
1019
 * have any children.
1020
 */
1021
isl_bool isl_schedule_node_has_children(__isl_keep isl_schedule_node *node)
1022
82.7k
{
1023
82.7k
  if (!node)
1024
0
    return isl_bool_error;
1025
82.7k
  return !isl_schedule_tree_is_leaf(node->tree);
1026
82.7k
}
1027
1028
/* Return the number of children of "node"?
1029
 *
1030
 * Any node other than the leaf nodes is considered to have at least
1031
 * one child, even if the corresponding isl_schedule_tree does not
1032
 * have any children.  That is, the number of children of "node" is
1033
 * only zero if its tree is the explicit empty tree.  Otherwise,
1034
 * if the isl_schedule_tree has any children, then it is equal
1035
 * to the number of children of "node".  If it has zero children,
1036
 * then "node" still has a leaf node as child.
1037
 */
1038
int isl_schedule_node_n_children(__isl_keep isl_schedule_node *node)
1039
832
{
1040
832
  int n;
1041
832
1042
832
  if (!node)
1043
0
    return -1;
1044
832
1045
832
  if (isl_schedule_tree_is_leaf(node->tree))
1046
0
    return 0;
1047
832
1048
832
  n = isl_schedule_tree_n_children(node->tree);
1049
832
  if (n == 0)
1050
218
    return 1;
1051
614
1052
614
  return n;
1053
614
}
1054
1055
/* Move the "node" pointer to the ancestor of the given generation
1056
 * of the node it currently points to, where generation 0 is the node
1057
 * itself and generation 1 is its parent.
1058
 */
1059
__isl_give isl_schedule_node *isl_schedule_node_ancestor(
1060
  __isl_take isl_schedule_node *node, int generation)
1061
67.0k
{
1062
67.0k
  int n;
1063
67.0k
  isl_schedule_tree *tree;
1064
67.0k
1065
67.0k
  if (!node)
1066
0
    return NULL;
1067
67.0k
  if (generation == 0)
1068
0
    return node;
1069
67.0k
  n = isl_schedule_node_get_tree_depth(node);
1070
67.0k
  if (n < 0)
1071
0
    return isl_schedule_node_free(node);
1072
67.0k
  if (generation < 0 || generation > n)
1073
67.0k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1074
67.0k
      "generation out of bounds",
1075
67.0k
      return isl_schedule_node_free(node));
1076
67.0k
  node = isl_schedule_node_cow(node);
1077
67.0k
  if (!node)
1078
0
    return NULL;
1079
67.0k
1080
67.0k
  tree = isl_schedule_tree_list_get_schedule_tree(node->ancestors,
1081
67.0k
              n - generation);
1082
67.0k
  isl_schedule_tree_free(node->tree);
1083
67.0k
  node->tree = tree;
1084
67.0k
  node->ancestors = isl_schedule_tree_list_drop(node->ancestors,
1085
67.0k
                n - generation, generation);
1086
67.0k
  if (!node->ancestors || !node->tree)
1087
0
    return isl_schedule_node_free(node);
1088
67.0k
1089
67.0k
  return node;
1090
67.0k
}
1091
1092
/* Move the "node" pointer to the parent of the node it currently points to.
1093
 */
1094
__isl_give isl_schedule_node *isl_schedule_node_parent(
1095
  __isl_take isl_schedule_node *node)
1096
34.9k
{
1097
34.9k
  if (!node)
1098
0
    return NULL;
1099
34.9k
  if (!isl_schedule_node_has_parent(node))
1100
34.9k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1101
34.9k
      "node has no parent",
1102
34.9k
      return isl_schedule_node_free(node));
1103
34.9k
  return isl_schedule_node_ancestor(node, 1);
1104
34.9k
}
1105
1106
/* Move the "node" pointer to the root of its schedule tree.
1107
 */
1108
__isl_give isl_schedule_node *isl_schedule_node_root(
1109
  __isl_take isl_schedule_node *node)
1110
1
{
1111
1
  int n;
1112
1
1113
1
  if (!node)
1114
0
    return NULL;
1115
1
  n = isl_schedule_node_get_tree_depth(node);
1116
1
  if (n < 0)
1117
0
    return isl_schedule_node_free(node);
1118
1
  return isl_schedule_node_ancestor(node, n);
1119
1
}
1120
1121
/* Move the "node" pointer to the child at position "pos" of the node
1122
 * it currently points to.
1123
 */
1124
__isl_give isl_schedule_node *isl_schedule_node_child(
1125
  __isl_take isl_schedule_node *node, int pos)
1126
42.3k
{
1127
42.3k
  int n;
1128
42.3k
  isl_ctx *ctx;
1129
42.3k
  isl_schedule_tree *tree;
1130
42.3k
  int *child_pos;
1131
42.3k
1132
42.3k
  node = isl_schedule_node_cow(node);
1133
42.3k
  if (!node)
1134
0
    return NULL;
1135
42.3k
  if (!isl_schedule_node_has_children(node))
1136
42.3k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1137
42.3k
      "node has no children",
1138
42.3k
      return isl_schedule_node_free(node));
1139
42.3k
1140
42.3k
  ctx = isl_schedule_node_get_ctx(node);
1141
42.3k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
1142
42.3k
  child_pos = isl_realloc_array(ctx, node->child_pos, int, n + 1);
1143
42.3k
  if (!child_pos)
1144
0
    return isl_schedule_node_free(node);
1145
42.3k
  node->child_pos = child_pos;
1146
42.3k
  node->child_pos[n] = pos;
1147
42.3k
1148
42.3k
  node->ancestors = isl_schedule_tree_list_add(node->ancestors,
1149
42.3k
        isl_schedule_tree_copy(node->tree));
1150
42.3k
  tree = node->tree;
1151
42.3k
  if (isl_schedule_tree_has_children(tree))
1152
23.2k
    tree = isl_schedule_tree_get_child(tree, pos);
1153
19.0k
  else
1154
19.0k
    tree = isl_schedule_node_get_leaf(node);
1155
42.3k
  isl_schedule_tree_free(node->tree);
1156
42.3k
  node->tree = tree;
1157
42.3k
1158
42.3k
  if (!node->tree || !node->ancestors)
1159
0
    return isl_schedule_node_free(node);
1160
42.3k
1161
42.3k
  return node;
1162
42.3k
}
1163
1164
/* Move the "node" pointer to the first child of the node
1165
 * it currently points to.
1166
 */
1167
__isl_give isl_schedule_node *isl_schedule_node_first_child(
1168
  __isl_take isl_schedule_node *node)
1169
33.3k
{
1170
33.3k
  return isl_schedule_node_child(node, 0);
1171
33.3k
}
1172
1173
/* Move the "node" pointer to the child of this node's parent in
1174
 * the previous child position.
1175
 */
1176
__isl_give isl_schedule_node *isl_schedule_node_previous_sibling(
1177
  __isl_take isl_schedule_node *node)
1178
0
{
1179
0
  int n;
1180
0
  isl_schedule_tree *parent, *tree;
1181
0
1182
0
  node = isl_schedule_node_cow(node);
1183
0
  if (!node)
1184
0
    return NULL;
1185
0
  if (!isl_schedule_node_has_previous_sibling(node))
1186
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1187
0
      "node has no previous sibling",
1188
0
      return isl_schedule_node_free(node));
1189
0
1190
0
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
1191
0
  parent = isl_schedule_tree_list_get_schedule_tree(node->ancestors,
1192
0
                  n - 1);
1193
0
  if (!parent)
1194
0
    return isl_schedule_node_free(node);
1195
0
  node->child_pos[n - 1]--;
1196
0
  tree = isl_schedule_tree_list_get_schedule_tree(parent->children,
1197
0
              node->child_pos[n - 1]);
1198
0
  isl_schedule_tree_free(parent);
1199
0
  if (!tree)
1200
0
    return isl_schedule_node_free(node);
1201
0
  isl_schedule_tree_free(node->tree);
1202
0
  node->tree = tree;
1203
0
1204
0
  return node;
1205
0
}
1206
1207
/* Move the "node" pointer to the child of this node's parent in
1208
 * the next child position.
1209
 */
1210
__isl_give isl_schedule_node *isl_schedule_node_next_sibling(
1211
  __isl_take isl_schedule_node *node)
1212
8.48k
{
1213
8.48k
  int n;
1214
8.48k
  isl_schedule_tree *parent, *tree;
1215
8.48k
1216
8.48k
  node = isl_schedule_node_cow(node);
1217
8.48k
  if (!node)
1218
0
    return NULL;
1219
8.48k
  if (!isl_schedule_node_has_next_sibling(node))
1220
8.48k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1221
8.48k
      "node has no next sibling",
1222
8.48k
      return isl_schedule_node_free(node));
1223
8.48k
1224
8.48k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
1225
8.48k
  parent = isl_schedule_tree_list_get_schedule_tree(node->ancestors,
1226
8.48k
                  n - 1);
1227
8.48k
  if (!parent)
1228
0
    return isl_schedule_node_free(node);
1229
8.48k
  node->child_pos[n - 1]++;
1230
8.48k
  tree = isl_schedule_tree_list_get_schedule_tree(parent->children,
1231
8.48k
              node->child_pos[n - 1]);
1232
8.48k
  isl_schedule_tree_free(parent);
1233
8.48k
  if (!tree)
1234
0
    return isl_schedule_node_free(node);
1235
8.48k
  isl_schedule_tree_free(node->tree);
1236
8.48k
  node->tree = tree;
1237
8.48k
1238
8.48k
  return node;
1239
8.48k
}
1240
1241
/* Return a copy to the child at position "pos" of "node".
1242
 */
1243
__isl_give isl_schedule_node *isl_schedule_node_get_child(
1244
  __isl_keep isl_schedule_node *node, int pos)
1245
602
{
1246
602
  return isl_schedule_node_child(isl_schedule_node_copy(node), pos);
1247
602
}
1248
1249
/* Traverse the descendant of "node" in depth-first order, including
1250
 * "node" itself.  Call "enter" whenever a node is entered and "leave"
1251
 * whenever a node is left.  The callback "enter" is responsible
1252
 * for moving to the deepest initial subtree of its argument that
1253
 * should be traversed.
1254
 */
1255
static __isl_give isl_schedule_node *traverse(
1256
  __isl_take isl_schedule_node *node,
1257
  __isl_give isl_schedule_node *(*enter)(
1258
    __isl_take isl_schedule_node *node, void *user),
1259
  __isl_give isl_schedule_node *(*leave)(
1260
    __isl_take isl_schedule_node *node, void *user),
1261
  void *user)
1262
7.08k
{
1263
7.08k
  int depth;
1264
7.08k
1265
7.08k
  if (!node)
1266
0
    return NULL;
1267
7.08k
1268
7.08k
  depth = isl_schedule_node_get_tree_depth(node);
1269
15.5k
  do {
1270
15.5k
    node = enter(node, user);
1271
15.5k
    node = leave(node, user);
1272
49.1k
    while (node && 
isl_schedule_node_get_tree_depth(node) > depth49.1k
&&
1273
49.1k
        
!isl_schedule_node_has_next_sibling(node)42.0k
) {
1274
33.6k
      node = isl_schedule_node_parent(node);
1275
33.6k
      node = leave(node, user);
1276
33.6k
    }
1277
15.5k
    if (node && 
isl_schedule_node_get_tree_depth(node) > depth15.5k
)
1278
8.48k
      node = isl_schedule_node_next_sibling(node);
1279
15.5k
  } while (node && 
isl_schedule_node_get_tree_depth(node) > depth15.5k
);
1280
7.08k
1281
7.08k
  return node;
1282
7.08k
}
1283
1284
/* Internal data structure for isl_schedule_node_foreach_descendant_top_down.
1285
 *
1286
 * "fn" is the user-specified callback function.
1287
 * "user" is the user-specified argument for the callback.
1288
 */
1289
struct isl_schedule_node_preorder_data {
1290
  isl_bool (*fn)(__isl_keep isl_schedule_node *node, void *user);
1291
  void *user;
1292
};
1293
1294
/* Callback for "traverse" to enter a node and to move
1295
 * to the deepest initial subtree that should be traversed
1296
 * for use in a preorder visit.
1297
 *
1298
 * If the user callback returns a negative value, then we abort
1299
 * the traversal.  If this callback returns zero, then we skip
1300
 * the subtree rooted at the current node.  Otherwise, we move
1301
 * down to the first child and repeat the process until a leaf
1302
 * is reached.
1303
 */
1304
static __isl_give isl_schedule_node *preorder_enter(
1305
  __isl_take isl_schedule_node *node, void *user)
1306
13.8k
{
1307
13.8k
  struct isl_schedule_node_preorder_data *data = user;
1308
13.8k
1309
13.8k
  if (!node)
1310
0
    return NULL;
1311
13.8k
1312
44.9k
  
do 13.8k
{
1313
44.9k
    isl_bool r;
1314
44.9k
1315
44.9k
    r = data->fn(node, data->user);
1316
44.9k
    if (r < 0)
1317
16
      return isl_schedule_node_free(node);
1318
44.8k
    if (r == isl_bool_false)
1319
7.94k
      return node;
1320
36.9k
  } while (isl_schedule_node_has_children(node) &&
1321
36.9k
    
(node = isl_schedule_node_first_child(node)) != NULL31.0k
);
1322
13.8k
1323
13.8k
  
return node5.88k
;
1324
13.8k
}
1325
1326
/* Callback for "traverse" to leave a node
1327
 * for use in a preorder visit.
1328
 * Since we already visited the node when we entered it,
1329
 * we do not need to do anything here.
1330
 */
1331
static __isl_give isl_schedule_node *preorder_leave(
1332
  __isl_take isl_schedule_node *node, void *user)
1333
44.8k
{
1334
44.8k
  return node;
1335
44.8k
}
1336
1337
/* Traverse the descendants of "node" (including the node itself)
1338
 * in depth first preorder.
1339
 *
1340
 * If "fn" returns isl_bool_error on any of the nodes,
1341
 * then the traversal is aborted.
1342
 * If "fn" returns isl_bool_false on any of the nodes, then the subtree rooted
1343
 * at that node is skipped.
1344
 *
1345
 * Return isl_stat_ok on success and isl_stat_error on failure.
1346
 */
1347
isl_stat isl_schedule_node_foreach_descendant_top_down(
1348
  __isl_keep isl_schedule_node *node,
1349
  isl_bool (*fn)(__isl_keep isl_schedule_node *node, void *user),
1350
  void *user)
1351
6.47k
{
1352
6.47k
  struct isl_schedule_node_preorder_data data = { fn, user };
1353
6.47k
1354
6.47k
  node = isl_schedule_node_copy(node);
1355
6.47k
  node = traverse(node, &preorder_enter, &preorder_leave, &data);
1356
6.47k
  isl_schedule_node_free(node);
1357
6.47k
1358
6.47k
  return node ? 
isl_stat_ok6.45k
:
isl_stat_error16
;
1359
6.47k
}
1360
1361
/* Internal data structure for isl_schedule_node_every_descendant.
1362
 *
1363
 * "test" is the user-specified callback function.
1364
 * "user" is the user-specified callback function argument.
1365
 *
1366
 * "failed" is initialized to 0 and set to 1 if "test" fails
1367
 * on any node.
1368
 */
1369
struct isl_union_map_every_data {
1370
  isl_bool (*test)(__isl_keep isl_schedule_node *node, void *user);
1371
  void *user;
1372
  int failed;
1373
};
1374
1375
/* isl_schedule_node_foreach_descendant_top_down callback
1376
 * that sets data->failed if data->test returns false and
1377
 * subsequently aborts the traversal.
1378
 */
1379
static isl_bool call_every(__isl_keep isl_schedule_node *node, void *user)
1380
0
{
1381
0
  struct isl_union_map_every_data *data = user;
1382
0
  isl_bool r;
1383
0
1384
0
  r = data->test(node, data->user);
1385
0
  if (r < 0)
1386
0
    return isl_bool_error;
1387
0
  if (r)
1388
0
    return isl_bool_true;
1389
0
  data->failed = 1;
1390
0
  return isl_bool_error;
1391
0
}
1392
1393
/* Does "test" succeed on every descendant of "node" (including "node" itself)?
1394
 */
1395
isl_bool isl_schedule_node_every_descendant(__isl_keep isl_schedule_node *node,
1396
  isl_bool (*test)(__isl_keep isl_schedule_node *node, void *user),
1397
  void *user)
1398
0
{
1399
0
  struct isl_union_map_every_data data = { test, user, 0 };
1400
0
  isl_stat r;
1401
0
1402
0
  r = isl_schedule_node_foreach_descendant_top_down(node, &call_every,
1403
0
              &data);
1404
0
  if (r >= 0)
1405
0
    return isl_bool_true;
1406
0
  if (data.failed)
1407
0
    return isl_bool_false;
1408
0
  return isl_bool_error;
1409
0
}
1410
1411
/* Internal data structure for isl_schedule_node_map_descendant_bottom_up.
1412
 *
1413
 * "fn" is the user-specified callback function.
1414
 * "user" is the user-specified argument for the callback.
1415
 */
1416
struct isl_schedule_node_postorder_data {
1417
  __isl_give isl_schedule_node *(*fn)(__isl_take isl_schedule_node *node,
1418
    void *user);
1419
  void *user;
1420
};
1421
1422
/* Callback for "traverse" to enter a node and to move
1423
 * to the deepest initial subtree that should be traversed
1424
 * for use in a postorder visit.
1425
 *
1426
 * Since we are performing a postorder visit, we only need
1427
 * to move to the deepest initial leaf here.
1428
 */
1429
static __isl_give isl_schedule_node *postorder_enter(
1430
  __isl_take isl_schedule_node *node, void *user)
1431
356
{
1432
1.29k
  while (node && isl_schedule_node_has_children(node))
1433
935
    node = isl_schedule_node_first_child(node);
1434
356
1435
356
  return node;
1436
356
}
1437
1438
/* Callback for "traverse" to leave a node
1439
 * for use in a postorder visit.
1440
 *
1441
 * Since we are performing a postorder visit, we need
1442
 * to call the user callback here.
1443
 */
1444
static __isl_give isl_schedule_node *postorder_leave(
1445
  __isl_take isl_schedule_node *node, void *user)
1446
1.61k
{
1447
1.61k
  struct isl_schedule_node_postorder_data *data = user;
1448
1.61k
1449
1.61k
  return data->fn(node, data->user);
1450
1.61k
}
1451
1452
/* Traverse the descendants of "node" (including the node itself)
1453
 * in depth first postorder, allowing the user to modify the visited node.
1454
 * The traversal continues from the node returned by the callback function.
1455
 * It is the responsibility of the user to ensure that this does not
1456
 * lead to an infinite loop.  It is safest to always return a pointer
1457
 * to the same position (same ancestors and child positions) as the input node.
1458
 */
1459
__isl_give isl_schedule_node *isl_schedule_node_map_descendant_bottom_up(
1460
  __isl_take isl_schedule_node *node,
1461
  __isl_give isl_schedule_node *(*fn)(__isl_take isl_schedule_node *node,
1462
    void *user), void *user)
1463
187
{
1464
187
  struct isl_schedule_node_postorder_data data = { fn, user };
1465
187
1466
187
  return traverse(node, &postorder_enter, &postorder_leave, &data);
1467
187
}
1468
1469
/* Traverse the ancestors of "node" from the root down to and including
1470
 * the parent of "node", calling "fn" on each of them.
1471
 *
1472
 * If "fn" returns -1 on any of the nodes, then the traversal is aborted.
1473
 *
1474
 * Return 0 on success and -1 on failure.
1475
 */
1476
isl_stat isl_schedule_node_foreach_ancestor_top_down(
1477
  __isl_keep isl_schedule_node *node,
1478
  isl_stat (*fn)(__isl_keep isl_schedule_node *node, void *user),
1479
  void *user)
1480
0
{
1481
0
  int i, n;
1482
0
1483
0
  if (!node)
1484
0
    return isl_stat_error;
1485
0
1486
0
  n = isl_schedule_node_get_tree_depth(node);
1487
0
  for (i = 0; i < n; ++i) {
1488
0
    isl_schedule_node *ancestor;
1489
0
    isl_stat r;
1490
0
1491
0
    ancestor = isl_schedule_node_copy(node);
1492
0
    ancestor = isl_schedule_node_ancestor(ancestor, n - i);
1493
0
    r = fn(ancestor, user);
1494
0
    isl_schedule_node_free(ancestor);
1495
0
    if (r < 0)
1496
0
      return isl_stat_error;
1497
0
  }
1498
0
1499
0
  return isl_stat_ok;
1500
0
}
1501
1502
/* Is any node in the subtree rooted at "node" anchored?
1503
 * That is, do any of these nodes reference the outer band nodes?
1504
 */
1505
isl_bool isl_schedule_node_is_subtree_anchored(
1506
  __isl_keep isl_schedule_node *node)
1507
3.96k
{
1508
3.96k
  if (!node)
1509
0
    return isl_bool_error;
1510
3.96k
  return isl_schedule_tree_is_subtree_anchored(node->tree);
1511
3.96k
}
1512
1513
/* Return the number of members in the given band node.
1514
 */
1515
unsigned isl_schedule_node_band_n_member(__isl_keep isl_schedule_node *node)
1516
1.72k
{
1517
1.72k
  return node ? isl_schedule_tree_band_n_member(node->tree) : 
00
;
1518
1.72k
}
1519
1520
/* Is the band member at position "pos" of the band node "node"
1521
 * marked coincident?
1522
 */
1523
isl_bool isl_schedule_node_band_member_get_coincident(
1524
  __isl_keep isl_schedule_node *node, int pos)
1525
824
{
1526
824
  if (!node)
1527
0
    return isl_bool_error;
1528
824
  return isl_schedule_tree_band_member_get_coincident(node->tree, pos);
1529
824
}
1530
1531
/* Mark the band member at position "pos" the band node "node"
1532
 * as being coincident or not according to "coincident".
1533
 */
1534
__isl_give isl_schedule_node *isl_schedule_node_band_member_set_coincident(
1535
  __isl_take isl_schedule_node *node, int pos, int coincident)
1536
275
{
1537
275
  int c;
1538
275
  isl_schedule_tree *tree;
1539
275
1540
275
  if (!node)
1541
0
    return NULL;
1542
275
  c = isl_schedule_node_band_member_get_coincident(node, pos);
1543
275
  if (c == coincident)
1544
88
    return node;
1545
187
1546
187
  tree = isl_schedule_tree_copy(node->tree);
1547
187
  tree = isl_schedule_tree_band_member_set_coincident(tree, pos,
1548
187
                  coincident);
1549
187
  node = isl_schedule_node_graft_tree(node, tree);
1550
187
1551
187
  return node;
1552
187
}
1553
1554
/* Is the band node "node" marked permutable?
1555
 */
1556
isl_bool isl_schedule_node_band_get_permutable(
1557
  __isl_keep isl_schedule_node *node)
1558
666
{
1559
666
  if (!node)
1560
0
    return isl_bool_error;
1561
666
1562
666
  return isl_schedule_tree_band_get_permutable(node->tree);
1563
666
}
1564
1565
/* Mark the band node "node" permutable or not according to "permutable"?
1566
 */
1567
__isl_give isl_schedule_node *isl_schedule_node_band_set_permutable(
1568
  __isl_take isl_schedule_node *node, int permutable)
1569
168
{
1570
168
  isl_schedule_tree *tree;
1571
168
1572
168
  if (!node)
1573
0
    return NULL;
1574
168
  if (isl_schedule_node_band_get_permutable(node) == permutable)
1575
26
    return node;
1576
142
1577
142
  tree = isl_schedule_tree_copy(node->tree);
1578
142
  tree = isl_schedule_tree_band_set_permutable(tree, permutable);
1579
142
  node = isl_schedule_node_graft_tree(node, tree);
1580
142
1581
142
  return node;
1582
142
}
1583
1584
/* Return the schedule space of the band node.
1585
 */
1586
__isl_give isl_space *isl_schedule_node_band_get_space(
1587
  __isl_keep isl_schedule_node *node)
1588
319
{
1589
319
  if (!node)
1590
0
    return NULL;
1591
319
1592
319
  return isl_schedule_tree_band_get_space(node->tree);
1593
319
}
1594
1595
/* Return the schedule of the band node in isolation.
1596
 */
1597
__isl_give isl_multi_union_pw_aff *isl_schedule_node_band_get_partial_schedule(
1598
  __isl_keep isl_schedule_node *node)
1599
761
{
1600
761
  if (!node)
1601
0
    return NULL;
1602
761
1603
761
  return isl_schedule_tree_band_get_partial_schedule(node->tree);
1604
761
}
1605
1606
/* Return the schedule of the band node in isolation in the form of
1607
 * an isl_union_map.
1608
 *
1609
 * If the band does not have any members, then we construct a universe map
1610
 * with the universe of the domain elements reaching the node as domain.
1611
 * Otherwise, we extract an isl_multi_union_pw_aff representation and
1612
 * convert that to an isl_union_map.
1613
 */
1614
__isl_give isl_union_map *isl_schedule_node_band_get_partial_schedule_union_map(
1615
  __isl_keep isl_schedule_node *node)
1616
37
{
1617
37
  isl_multi_union_pw_aff *mupa;
1618
37
1619
37
  if (!node)
1620
0
    return NULL;
1621
37
1622
37
  if (isl_schedule_node_get_type(node) != isl_schedule_node_band)
1623
37
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1624
37
      "not a band node", return NULL);
1625
37
  if (isl_schedule_node_band_n_member(node) == 0) {
1626
0
    isl_union_set *domain;
1627
0
1628
0
    domain = isl_schedule_node_get_universe_domain(node);
1629
0
    return isl_union_map_from_domain(domain);
1630
0
  }
1631
37
1632
37
  mupa = isl_schedule_node_band_get_partial_schedule(node);
1633
37
  return isl_union_map_from_multi_union_pw_aff(mupa);
1634
37
}
1635
1636
/* Return the loop AST generation type for the band member of band node "node"
1637
 * at position "pos".
1638
 */
1639
enum isl_ast_loop_type isl_schedule_node_band_member_get_ast_loop_type(
1640
  __isl_keep isl_schedule_node *node, int pos)
1641
838
{
1642
838
  if (!node)
1643
0
    return isl_ast_loop_error;
1644
838
1645
838
  return isl_schedule_tree_band_member_get_ast_loop_type(node->tree, pos);
1646
838
}
1647
1648
/* Set the loop AST generation type for the band member of band node "node"
1649
 * at position "pos" to "type".
1650
 */
1651
__isl_give isl_schedule_node *isl_schedule_node_band_member_set_ast_loop_type(
1652
  __isl_take isl_schedule_node *node, int pos,
1653
  enum isl_ast_loop_type type)
1654
0
{
1655
0
  isl_schedule_tree *tree;
1656
0
1657
0
  if (!node)
1658
0
    return NULL;
1659
0
1660
0
  tree = isl_schedule_tree_copy(node->tree);
1661
0
  tree = isl_schedule_tree_band_member_set_ast_loop_type(tree, pos, type);
1662
0
  return isl_schedule_node_graft_tree(node, tree);
1663
0
}
1664
1665
/* Return the loop AST generation type for the band member of band node "node"
1666
 * at position "pos" for the isolated part.
1667
 */
1668
enum isl_ast_loop_type isl_schedule_node_band_member_get_isolate_ast_loop_type(
1669
  __isl_keep isl_schedule_node *node, int pos)
1670
917
{
1671
917
  if (!node)
1672
0
    return isl_ast_loop_error;
1673
917
1674
917
  return isl_schedule_tree_band_member_get_isolate_ast_loop_type(
1675
917
                  node->tree, pos);
1676
917
}
1677
1678
/* Set the loop AST generation type for the band member of band node "node"
1679
 * at position "pos" for the isolated part to "type".
1680
 */
1681
__isl_give isl_schedule_node *
1682
isl_schedule_node_band_member_set_isolate_ast_loop_type(
1683
  __isl_take isl_schedule_node *node, int pos,
1684
  enum isl_ast_loop_type type)
1685
0
{
1686
0
  isl_schedule_tree *tree;
1687
0
1688
0
  if (!node)
1689
0
    return NULL;
1690
0
1691
0
  tree = isl_schedule_tree_copy(node->tree);
1692
0
  tree = isl_schedule_tree_band_member_set_isolate_ast_loop_type(tree,
1693
0
                    pos, type);
1694
0
  return isl_schedule_node_graft_tree(node, tree);
1695
0
}
1696
1697
/* Return the AST build options associated to band node "node".
1698
 */
1699
__isl_give isl_union_set *isl_schedule_node_band_get_ast_build_options(
1700
  __isl_keep isl_schedule_node *node)
1701
0
{
1702
0
  if (!node)
1703
0
    return NULL;
1704
0
1705
0
  return isl_schedule_tree_band_get_ast_build_options(node->tree);
1706
0
}
1707
1708
/* Replace the AST build options associated to band node "node" by "options".
1709
 */
1710
__isl_give isl_schedule_node *isl_schedule_node_band_set_ast_build_options(
1711
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *options)
1712
74
{
1713
74
  isl_schedule_tree *tree;
1714
74
1715
74
  if (!node || !options)
1716
0
    goto error;
1717
74
1718
74
  tree = isl_schedule_tree_copy(node->tree);
1719
74
  tree = isl_schedule_tree_band_set_ast_build_options(tree, options);
1720
74
  return isl_schedule_node_graft_tree(node, tree);
1721
0
error:
1722
0
  isl_schedule_node_free(node);
1723
0
  isl_union_set_free(options);
1724
0
  return NULL;
1725
74
}
1726
1727
/* Return the "isolate" option associated to band node "node".
1728
 */
1729
__isl_give isl_set *isl_schedule_node_band_get_ast_isolate_option(
1730
  __isl_keep isl_schedule_node *node)
1731
2.37k
{
1732
2.37k
  int depth;
1733
2.37k
1734
2.37k
  if (!node)
1735
0
    return NULL;
1736
2.37k
1737
2.37k
  depth = isl_schedule_node_get_schedule_depth(node);
1738
2.37k
  return isl_schedule_tree_band_get_ast_isolate_option(node->tree, depth);
1739
2.37k
}
1740
1741
/* Make sure that that spaces of "node" and "mv" are the same.
1742
 * Return -1 on error, reporting the error to the user.
1743
 */
1744
static int check_space_multi_val(__isl_keep isl_schedule_node *node,
1745
  __isl_keep isl_multi_val *mv)
1746
80
{
1747
80
  isl_space *node_space, *mv_space;
1748
80
  int equal;
1749
80
1750
80
  node_space = isl_schedule_node_band_get_space(node);
1751
80
  mv_space = isl_multi_val_get_space(mv);
1752
80
  equal = isl_space_tuple_is_equal(node_space, isl_dim_set,
1753
80
          mv_space, isl_dim_set);
1754
80
  isl_space_free(mv_space);
1755
80
  isl_space_free(node_space);
1756
80
  if (equal < 0)
1757
0
    return -1;
1758
80
  if (!equal)
1759
80
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1760
80
      "spaces don't match", return -1);
1761
80
1762
80
  return 0;
1763
80
}
1764
1765
/* Multiply the partial schedule of the band node "node"
1766
 * with the factors in "mv".
1767
 */
1768
__isl_give isl_schedule_node *isl_schedule_node_band_scale(
1769
  __isl_take isl_schedule_node *node, __isl_take isl_multi_val *mv)
1770
0
{
1771
0
  isl_schedule_tree *tree;
1772
0
  int anchored;
1773
0
1774
0
  if (!node || !mv)
1775
0
    goto error;
1776
0
  if (check_space_multi_val(node, mv) < 0)
1777
0
    goto error;
1778
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
1779
0
  if (anchored < 0)
1780
0
    goto error;
1781
0
  if (anchored)
1782
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1783
0
      "cannot scale band node with anchored subtree",
1784
0
      goto error);
1785
0
1786
0
  tree = isl_schedule_node_get_tree(node);
1787
0
  tree = isl_schedule_tree_band_scale(tree, mv);
1788
0
  return isl_schedule_node_graft_tree(node, tree);
1789
0
error:
1790
0
  isl_multi_val_free(mv);
1791
0
  isl_schedule_node_free(node);
1792
0
  return NULL;
1793
0
}
1794
1795
/* Divide the partial schedule of the band node "node"
1796
 * by the factors in "mv".
1797
 */
1798
__isl_give isl_schedule_node *isl_schedule_node_band_scale_down(
1799
  __isl_take isl_schedule_node *node, __isl_take isl_multi_val *mv)
1800
0
{
1801
0
  isl_schedule_tree *tree;
1802
0
  int anchored;
1803
0
1804
0
  if (!node || !mv)
1805
0
    goto error;
1806
0
  if (check_space_multi_val(node, mv) < 0)
1807
0
    goto error;
1808
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
1809
0
  if (anchored < 0)
1810
0
    goto error;
1811
0
  if (anchored)
1812
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1813
0
      "cannot scale down band node with anchored subtree",
1814
0
      goto error);
1815
0
1816
0
  tree = isl_schedule_node_get_tree(node);
1817
0
  tree = isl_schedule_tree_band_scale_down(tree, mv);
1818
0
  return isl_schedule_node_graft_tree(node, tree);
1819
0
error:
1820
0
  isl_multi_val_free(mv);
1821
0
  isl_schedule_node_free(node);
1822
0
  return NULL;
1823
0
}
1824
1825
/* Reduce the partial schedule of the band node "node"
1826
 * modulo the factors in "mv".
1827
 */
1828
__isl_give isl_schedule_node *isl_schedule_node_band_mod(
1829
  __isl_take isl_schedule_node *node, __isl_take isl_multi_val *mv)
1830
0
{
1831
0
  isl_schedule_tree *tree;
1832
0
  isl_bool anchored;
1833
0
1834
0
  if (!node || !mv)
1835
0
    goto error;
1836
0
  if (check_space_multi_val(node, mv) < 0)
1837
0
    goto error;
1838
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
1839
0
  if (anchored < 0)
1840
0
    goto error;
1841
0
  if (anchored)
1842
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1843
0
      "cannot perform mod on band node with anchored subtree",
1844
0
      goto error);
1845
0
1846
0
  tree = isl_schedule_node_get_tree(node);
1847
0
  tree = isl_schedule_tree_band_mod(tree, mv);
1848
0
  return isl_schedule_node_graft_tree(node, tree);
1849
0
error:
1850
0
  isl_multi_val_free(mv);
1851
0
  isl_schedule_node_free(node);
1852
0
  return NULL;
1853
0
}
1854
1855
/* Make sure that that spaces of "node" and "mupa" are the same.
1856
 * Return isl_stat_error on error, reporting the error to the user.
1857
 */
1858
static isl_stat check_space_multi_union_pw_aff(
1859
  __isl_keep isl_schedule_node *node,
1860
  __isl_keep isl_multi_union_pw_aff *mupa)
1861
0
{
1862
0
  isl_space *node_space, *mupa_space;
1863
0
  isl_bool equal;
1864
0
1865
0
  node_space = isl_schedule_node_band_get_space(node);
1866
0
  mupa_space = isl_multi_union_pw_aff_get_space(mupa);
1867
0
  equal = isl_space_tuple_is_equal(node_space, isl_dim_set,
1868
0
          mupa_space, isl_dim_set);
1869
0
  isl_space_free(mupa_space);
1870
0
  isl_space_free(node_space);
1871
0
  if (equal < 0)
1872
0
    return isl_stat_error;
1873
0
  if (!equal)
1874
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1875
0
      "spaces don't match", return isl_stat_error);
1876
0
1877
0
  return isl_stat_ok;
1878
0
}
1879
1880
/* Shift the partial schedule of the band node "node" by "shift".
1881
 */
1882
__isl_give isl_schedule_node *isl_schedule_node_band_shift(
1883
  __isl_take isl_schedule_node *node,
1884
  __isl_take isl_multi_union_pw_aff *shift)
1885
0
{
1886
0
  isl_schedule_tree *tree;
1887
0
  int anchored;
1888
0
1889
0
  if (!node || !shift)
1890
0
    goto error;
1891
0
  if (check_space_multi_union_pw_aff(node, shift) < 0)
1892
0
    goto error;
1893
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
1894
0
  if (anchored < 0)
1895
0
    goto error;
1896
0
  if (anchored)
1897
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1898
0
      "cannot shift band node with anchored subtree",
1899
0
      goto error);
1900
0
1901
0
  tree = isl_schedule_node_get_tree(node);
1902
0
  tree = isl_schedule_tree_band_shift(tree, shift);
1903
0
  return isl_schedule_node_graft_tree(node, tree);
1904
0
error:
1905
0
  isl_multi_union_pw_aff_free(shift);
1906
0
  isl_schedule_node_free(node);
1907
0
  return NULL;
1908
0
}
1909
1910
/* Tile "node" with tile sizes "sizes".
1911
 *
1912
 * The current node is replaced by two nested nodes corresponding
1913
 * to the tile dimensions and the point dimensions.
1914
 *
1915
 * Return a pointer to the outer (tile) node.
1916
 *
1917
 * If any of the descendants of "node" depend on the set of outer band nodes,
1918
 * then we refuse to tile the node.
1919
 *
1920
 * If the scale tile loops option is set, then the tile loops
1921
 * are scaled by the tile sizes.  If the shift point loops option is set,
1922
 * then the point loops are shifted to start at zero.
1923
 * In particular, these options affect the tile and point loop schedules
1924
 * as follows
1925
 *
1926
 *  scale shift original  tile    point
1927
 *
1928
 *  0 0 i   floor(i/s)  i
1929
 *  1 0 i   s * floor(i/s)  i
1930
 *  0 1 i   floor(i/s)  i - s * floor(i/s)
1931
 *  1 1 i   s * floor(i/s)  i - s * floor(i/s)
1932
 */
1933
__isl_give isl_schedule_node *isl_schedule_node_band_tile(
1934
  __isl_take isl_schedule_node *node, __isl_take isl_multi_val *sizes)
1935
80
{
1936
80
  isl_schedule_tree *tree;
1937
80
  int anchored;
1938
80
1939
80
  if (!node || !sizes)
1940
0
    goto error;
1941
80
  anchored = isl_schedule_node_is_subtree_anchored(node);
1942
80
  if (anchored < 0)
1943
0
    goto error;
1944
80
  if (anchored)
1945
80
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1946
80
      "cannot tile band node with anchored subtree",
1947
80
      goto error);
1948
80
1949
80
  if (check_space_multi_val(node, sizes) < 0)
1950
0
    goto error;
1951
80
1952
80
  tree = isl_schedule_node_get_tree(node);
1953
80
  tree = isl_schedule_tree_band_tile(tree, sizes);
1954
80
  return isl_schedule_node_graft_tree(node, tree);
1955
0
error:
1956
0
  isl_multi_val_free(sizes);
1957
0
  isl_schedule_node_free(node);
1958
0
  return NULL;
1959
80
}
1960
1961
/* Move the band node "node" down to all the leaves in the subtree
1962
 * rooted at "node".
1963
 * Return a pointer to the node in the resulting tree that is in the same
1964
 * position as the node pointed to by "node" in the original tree.
1965
 *
1966
 * If the node only has a leaf child, then nothing needs to be done.
1967
 * Otherwise, the child of the node is removed and the result is
1968
 * appended to all the leaves in the subtree rooted at the original child.
1969
 * Since the node is moved to the leaves, it needs to be expanded
1970
 * according to the expansion, if any, defined by that subtree.
1971
 * In the end, the original node is replaced by the result of
1972
 * attaching copies of the expanded node to the leaves.
1973
 *
1974
 * If any of the nodes in the subtree rooted at "node" depend on
1975
 * the set of outer band nodes then we refuse to sink the band node.
1976
 */
1977
__isl_give isl_schedule_node *isl_schedule_node_band_sink(
1978
  __isl_take isl_schedule_node *node)
1979
17
{
1980
17
  enum isl_schedule_node_type type;
1981
17
  isl_schedule_tree *tree, *child;
1982
17
  isl_union_pw_multi_aff *contraction;
1983
17
  int anchored;
1984
17
1985
17
  if (!node)
1986
0
    return NULL;
1987
17
1988
17
  type = isl_schedule_node_get_type(node);
1989
17
  if (type != isl_schedule_node_band)
1990
17
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1991
17
      "not a band node", return isl_schedule_node_free(node));
1992
17
  anchored = isl_schedule_node_is_subtree_anchored(node);
1993
17
  if (anchored < 0)
1994
0
    return isl_schedule_node_free(node);
1995
17
  if (anchored)
1996
17
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1997
17
      "cannot sink band node in anchored subtree",
1998
17
      return isl_schedule_node_free(node));
1999
17
  if (isl_schedule_tree_n_children(node->tree) == 0)
2000
9
    return node;
2001
8
2002
8
  contraction = isl_schedule_node_get_subtree_contraction(node);
2003
8
2004
8
  tree = isl_schedule_node_get_tree(node);
2005
8
  child = isl_schedule_tree_get_child(tree, 0);
2006
8
  tree = isl_schedule_tree_reset_children(tree);
2007
8
  tree = isl_schedule_tree_pullback_union_pw_multi_aff(tree, contraction);
2008
8
  tree = isl_schedule_tree_append_to_leaves(child, tree);
2009
8
2010
8
  return isl_schedule_node_graft_tree(node, tree);
2011
8
}
2012
2013
/* Split "node" into two nested band nodes, one with the first "pos"
2014
 * dimensions and one with the remaining dimensions.
2015
 * The schedules of the two band nodes live in anonymous spaces.
2016
 * The loop AST generation type options and the isolate option
2017
 * are split over the two band nodes.
2018
 */
2019
__isl_give isl_schedule_node *isl_schedule_node_band_split(
2020
  __isl_take isl_schedule_node *node, int pos)
2021
36
{
2022
36
  int depth;
2023
36
  isl_schedule_tree *tree;
2024
36
2025
36
  depth = isl_schedule_node_get_schedule_depth(node);
2026
36
  tree = isl_schedule_node_get_tree(node);
2027
36
  tree = isl_schedule_tree_band_split(tree, pos, depth);
2028
36
  return isl_schedule_node_graft_tree(node, tree);
2029
36
}
2030
2031
/* Return the context of the context node "node".
2032
 */
2033
__isl_give isl_set *isl_schedule_node_context_get_context(
2034
  __isl_keep isl_schedule_node *node)
2035
0
{
2036
0
  if (!node)
2037
0
    return NULL;
2038
0
2039
0
  return isl_schedule_tree_context_get_context(node->tree);
2040
0
}
2041
2042
/* Return the domain of the domain node "node".
2043
 */
2044
__isl_give isl_union_set *isl_schedule_node_domain_get_domain(
2045
  __isl_keep isl_schedule_node *node)
2046
446
{
2047
446
  if (!node)
2048
0
    return NULL;
2049
446
2050
446
  return isl_schedule_tree_domain_get_domain(node->tree);
2051
446
}
2052
2053
/* Return the expansion map of expansion node "node".
2054
 */
2055
__isl_give isl_union_map *isl_schedule_node_expansion_get_expansion(
2056
  __isl_keep isl_schedule_node *node)
2057
2
{
2058
2
  if (!node)
2059
0
    return NULL;
2060
2
2061
2
  return isl_schedule_tree_expansion_get_expansion(node->tree);
2062
2
}
2063
2064
/* Return the contraction of expansion node "node".
2065
 */
2066
__isl_give isl_union_pw_multi_aff *isl_schedule_node_expansion_get_contraction(
2067
  __isl_keep isl_schedule_node *node)
2068
0
{
2069
0
  if (!node)
2070
0
    return NULL;
2071
0
2072
0
  return isl_schedule_tree_expansion_get_contraction(node->tree);
2073
0
}
2074
2075
/* Replace the contraction and the expansion of the expansion node "node"
2076
 * by "contraction" and "expansion".
2077
 */
2078
__isl_give isl_schedule_node *
2079
isl_schedule_node_expansion_set_contraction_and_expansion(
2080
  __isl_take isl_schedule_node *node,
2081
  __isl_take isl_union_pw_multi_aff *contraction,
2082
  __isl_take isl_union_map *expansion)
2083
0
{
2084
0
  isl_schedule_tree *tree;
2085
0
2086
0
  if (!node || !contraction || !expansion)
2087
0
    goto error;
2088
0
2089
0
  tree = isl_schedule_tree_copy(node->tree);
2090
0
  tree = isl_schedule_tree_expansion_set_contraction_and_expansion(tree,
2091
0
              contraction, expansion);
2092
0
  return isl_schedule_node_graft_tree(node, tree);
2093
0
error:
2094
0
  isl_schedule_node_free(node);
2095
0
  isl_union_pw_multi_aff_free(contraction);
2096
0
  isl_union_map_free(expansion);
2097
0
  return NULL;
2098
0
}
2099
2100
/* Return the extension of the extension node "node".
2101
 */
2102
__isl_give isl_union_map *isl_schedule_node_extension_get_extension(
2103
  __isl_keep isl_schedule_node *node)
2104
46
{
2105
46
  if (!node)
2106
0
    return NULL;
2107
46
2108
46
  return isl_schedule_tree_extension_get_extension(node->tree);
2109
46
}
2110
2111
/* Replace the extension of extension node "node" by "extension".
2112
 */
2113
__isl_give isl_schedule_node *isl_schedule_node_extension_set_extension(
2114
  __isl_take isl_schedule_node *node, __isl_take isl_union_map *extension)
2115
0
{
2116
0
  isl_schedule_tree *tree;
2117
0
2118
0
  if (!node || !extension)
2119
0
    goto error;
2120
0
2121
0
  tree = isl_schedule_tree_copy(node->tree);
2122
0
  tree = isl_schedule_tree_extension_set_extension(tree, extension);
2123
0
  return isl_schedule_node_graft_tree(node, tree);
2124
0
error:
2125
0
  isl_schedule_node_free(node);
2126
0
  isl_union_map_free(extension);
2127
0
  return NULL;
2128
0
}
2129
2130
/* Return the filter of the filter node "node".
2131
 */
2132
__isl_give isl_union_set *isl_schedule_node_filter_get_filter(
2133
  __isl_keep isl_schedule_node *node)
2134
3.08k
{
2135
3.08k
  if (!node)
2136
0
    return NULL;
2137
3.08k
2138
3.08k
  return isl_schedule_tree_filter_get_filter(node->tree);
2139
3.08k
}
2140
2141
/* Replace the filter of filter node "node" by "filter".
2142
 */
2143
__isl_give isl_schedule_node *isl_schedule_node_filter_set_filter(
2144
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
2145
1.14k
{
2146
1.14k
  isl_schedule_tree *tree;
2147
1.14k
2148
1.14k
  if (!node || !filter)
2149
0
    goto error;
2150
1.14k
2151
1.14k
  tree = isl_schedule_tree_copy(node->tree);
2152
1.14k
  tree = isl_schedule_tree_filter_set_filter(tree, filter);
2153
1.14k
  return isl_schedule_node_graft_tree(node, tree);
2154
0
error:
2155
0
  isl_schedule_node_free(node);
2156
0
  isl_union_set_free(filter);
2157
0
  return NULL;
2158
1.14k
}
2159
2160
/* Intersect the filter of filter node "node" with "filter".
2161
 *
2162
 * If the filter of the node is already a subset of "filter",
2163
 * then leave the node unchanged.
2164
 */
2165
__isl_give isl_schedule_node *isl_schedule_node_filter_intersect_filter(
2166
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
2167
0
{
2168
0
  isl_union_set *node_filter = NULL;
2169
0
  isl_bool subset;
2170
0
2171
0
  if (!node || !filter)
2172
0
    goto error;
2173
0
2174
0
  node_filter = isl_schedule_node_filter_get_filter(node);
2175
0
  subset = isl_union_set_is_subset(node_filter, filter);
2176
0
  if (subset < 0)
2177
0
    goto error;
2178
0
  if (subset) {
2179
0
    isl_union_set_free(node_filter);
2180
0
    isl_union_set_free(filter);
2181
0
    return node;
2182
0
  }
2183
0
  node_filter = isl_union_set_intersect(node_filter, filter);
2184
0
  node = isl_schedule_node_filter_set_filter(node, node_filter);
2185
0
  return node;
2186
0
error:
2187
0
  isl_schedule_node_free(node);
2188
0
  isl_union_set_free(node_filter);
2189
0
  isl_union_set_free(filter);
2190
0
  return NULL;
2191
0
}
2192
2193
/* Return the guard of the guard node "node".
2194
 */
2195
__isl_give isl_set *isl_schedule_node_guard_get_guard(
2196
  __isl_keep isl_schedule_node *node)
2197
0
{
2198
0
  if (!node)
2199
0
    return NULL;
2200
0
2201
0
  return isl_schedule_tree_guard_get_guard(node->tree);
2202
0
}
2203
2204
/* Return the mark identifier of the mark node "node".
2205
 */
2206
__isl_give isl_id *isl_schedule_node_mark_get_id(
2207
  __isl_keep isl_schedule_node *node)
2208
175
{
2209
175
  if (!node)
2210
0
    return NULL;
2211
175
2212
175
  return isl_schedule_tree_mark_get_id(node->tree);
2213
175
}
2214
2215
/* Replace the child at position "pos" of the sequence node "node"
2216
 * by the children of sequence root node of "tree".
2217
 */
2218
__isl_give isl_schedule_node *isl_schedule_node_sequence_splice(
2219
  __isl_take isl_schedule_node *node, int pos,
2220
  __isl_take isl_schedule_tree *tree)
2221
0
{
2222
0
  isl_schedule_tree *node_tree;
2223
0
2224
0
  if (!node || !tree)
2225
0
    goto error;
2226
0
  if (isl_schedule_node_get_type(node) != isl_schedule_node_sequence)
2227
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
2228
0
      "not a sequence node", goto error);
2229
0
  if (isl_schedule_tree_get_type(tree) != isl_schedule_node_sequence)
2230
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
2231
0
      "not a sequence node", goto error);
2232
0
  node_tree = isl_schedule_node_get_tree(node);
2233
0
  node_tree = isl_schedule_tree_sequence_splice(node_tree, pos, tree);
2234
0
  node = isl_schedule_node_graft_tree(node, node_tree);
2235
0
2236
0
  return node;
2237
0
error:
2238
0
  isl_schedule_node_free(node);
2239
0
  isl_schedule_tree_free(tree);
2240
0
  return NULL;
2241
0
}
2242
2243
/* Given a sequence node "node", with a child at position "pos" that
2244
 * is also a sequence node, attach the children of that node directly
2245
 * as children of "node" at that position, replacing the original child.
2246
 *
2247
 * The filters of these children are intersected with the filter
2248
 * of the child at position "pos".
2249
 */
2250
__isl_give isl_schedule_node *isl_schedule_node_sequence_splice_child(
2251
  __isl_take isl_schedule_node *node, int pos)
2252
0
{
2253
0
  int i, n;
2254
0
  isl_union_set *filter;
2255
0
  isl_schedule_node *child;
2256
0
  isl_schedule_tree *tree;
2257
0
2258
0
  if (!node)
2259
0
    return NULL;
2260
0
  if (isl_schedule_node_get_type(node) != isl_schedule_node_sequence)
2261
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
2262
0
      "not a sequence node",
2263
0
      return isl_schedule_node_free(node));
2264
0
  node = isl_schedule_node_child(node, pos);
2265
0
  node = isl_schedule_node_child(node, 0);
2266
0
  if (isl_schedule_node_get_type(node) != isl_schedule_node_sequence)
2267
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
2268
0
      "not a sequence node",
2269
0
      return isl_schedule_node_free(node));
2270
0
  child = isl_schedule_node_copy(node);
2271
0
  node = isl_schedule_node_parent(node);
2272
0
  filter = isl_schedule_node_filter_get_filter(node);
2273
0
  n = isl_schedule_node_n_children(child);
2274
0
  for (i = 0; i < n; ++i) {
2275
0
    child = isl_schedule_node_child(child, i);
2276
0
    child = isl_schedule_node_filter_intersect_filter(child,
2277
0
            isl_union_set_copy(filter));
2278
0
    child = isl_schedule_node_parent(child);
2279
0
  }
2280
0
  isl_union_set_free(filter);
2281
0
  tree = isl_schedule_node_get_tree(child);
2282
0
  isl_schedule_node_free(child);
2283
0
  node = isl_schedule_node_parent(node);
2284
0
  node = isl_schedule_node_sequence_splice(node, pos, tree);
2285
0
2286
0
  return node;
2287
0
}
2288
2289
/* Update the ancestors of "node" to point to the tree that "node"
2290
 * now points to.
2291
 * That is, replace the child in the original parent that corresponds
2292
 * to the current tree position by node->tree and continue updating
2293
 * the ancestors in the same way until the root is reached.
2294
 *
2295
 * If "fn" is not NULL, then it is called on each ancestor as we move up
2296
 * the tree so that it can modify the ancestor before it is added
2297
 * to the list of ancestors of the modified node.
2298
 * The additional "pos" argument records the position
2299
 * of the "tree" argument in the original schedule tree.
2300
 *
2301
 * If "node" originally points to a leaf of the schedule tree, then make sure
2302
 * that in the end it points to a leaf in the updated schedule tree.
2303
 */
2304
static __isl_give isl_schedule_node *update_ancestors(
2305
  __isl_take isl_schedule_node *node,
2306
  __isl_give isl_schedule_tree *(*fn)(__isl_take isl_schedule_tree *tree,
2307
    __isl_keep isl_schedule_node *pos, void *user), void *user)
2308
7.12k
{
2309
7.12k
  int i, n;
2310
7.12k
  int is_leaf;
2311
7.12k
  isl_schedule_tree *tree;
2312
7.12k
  isl_schedule_node *pos = NULL;
2313
7.12k
2314
7.12k
  if (fn)
2315
3
    pos = isl_schedule_node_copy(node);
2316
7.12k
2317
7.12k
  node = isl_schedule_node_cow(node);
2318
7.12k
  if (!node)
2319
0
    return isl_schedule_node_free(pos);
2320
7.12k
2321
7.12k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
2322
7.12k
  tree = isl_schedule_tree_copy(node->tree);
2323
7.12k
2324
19.9k
  for (i = n - 1; i >= 0; 
--i12.7k
) {
2325
12.7k
    isl_schedule_tree *parent;
2326
12.7k
2327
12.7k
    parent = isl_schedule_tree_list_get_schedule_tree(
2328
12.7k
                node->ancestors, i);
2329
12.7k
    parent = isl_schedule_tree_replace_child(parent,
2330
12.7k
                node->child_pos[i], tree);
2331
12.7k
    if (fn) {
2332
9
      pos = isl_schedule_node_parent(pos);
2333
9
      parent = fn(parent, pos, user);
2334
9
    }
2335
12.7k
    node->ancestors = isl_schedule_tree_list_set_schedule_tree(
2336
12.7k
          node->ancestors, i, isl_schedule_tree_copy(parent));
2337
12.7k
2338
12.7k
    tree = parent;
2339
12.7k
  }
2340
7.12k
2341
7.12k
  if (fn)
2342
3
    isl_schedule_node_free(pos);
2343
7.12k
2344
7.12k
  is_leaf = isl_schedule_tree_is_leaf(node->tree);
2345
7.12k
  node->schedule = isl_schedule_set_root(node->schedule, tree);
2346
7.12k
  if (is_leaf) {
2347
206
    isl_schedule_tree_free(node->tree);
2348
206
    node->tree = isl_schedule_node_get_leaf(node);
2349
206
  }
2350
7.12k
2351
7.12k
  if (!node->schedule || !node->ancestors)
2352
0
    return isl_schedule_node_free(node);
2353
7.12k
2354
7.12k
  return node;
2355
7.12k
}
2356
2357
/* Replace the subtree that "pos" points to by "tree", updating
2358
 * the ancestors to maintain a consistent state.
2359
 */
2360
__isl_give isl_schedule_node *isl_schedule_node_graft_tree(
2361
  __isl_take isl_schedule_node *pos, __isl_take isl_schedule_tree *tree)
2362
7.97k
{
2363
7.97k
  if (!tree || !pos)
2364
0
    goto error;
2365
7.97k
  if (pos->tree == tree) {
2366
845
    isl_schedule_tree_free(tree);
2367
845
    return pos;
2368
845
  }
2369
7.12k
2370
7.12k
  pos = isl_schedule_node_cow(pos);
2371
7.12k
  if (!pos)
2372
0
    goto error;
2373
7.12k
2374
7.12k
  isl_schedule_tree_free(pos->tree);
2375
7.12k
  pos->tree = tree;
2376
7.12k
2377
7.12k
  return update_ancestors(pos, NULL, NULL);
2378
0
error:
2379
0
  isl_schedule_node_free(pos);
2380
0
  isl_schedule_tree_free(tree);
2381
0
  return NULL;
2382
7.12k
}
2383
2384
/* Make sure we can insert a node between "node" and its parent.
2385
 * Return -1 on error, reporting the reason why we cannot insert a node.
2386
 */
2387
static int check_insert(__isl_keep isl_schedule_node *node)
2388
2.27k
{
2389
2.27k
  int has_parent;
2390
2.27k
  enum isl_schedule_node_type type;
2391
2.27k
2392
2.27k
  has_parent = isl_schedule_node_has_parent(node);
2393
2.27k
  if (has_parent < 0)
2394
0
    return -1;
2395
2.27k
  if (!has_parent)
2396
2.27k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2397
2.27k
      "cannot insert node outside of root", return -1);
2398
2.27k
2399
2.27k
  type = isl_schedule_node_get_parent_type(node);
2400
2.27k
  if (type == isl_schedule_node_error)
2401
0
    return -1;
2402
2.27k
  if (type == isl_schedule_node_set || type == isl_schedule_node_sequence)
2403
2.27k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2404
2.27k
      "cannot insert node between set or sequence node "
2405
2.27k
      "and its filter children", return -1);
2406
2.27k
2407
2.27k
  return 0;
2408
2.27k
}
2409
2410
/* Insert a band node with partial schedule "mupa" between "node" and
2411
 * its parent.
2412
 * Return a pointer to the new band node.
2413
 *
2414
 * If any of the nodes in the subtree rooted at "node" depend on
2415
 * the set of outer band nodes then we refuse to insert the band node.
2416
 */
2417
__isl_give isl_schedule_node *isl_schedule_node_insert_partial_schedule(
2418
  __isl_take isl_schedule_node *node,
2419
  __isl_take isl_multi_union_pw_aff *mupa)
2420
1.98k
{
2421
1.98k
  int anchored;
2422
1.98k
  isl_schedule_band *band;
2423
1.98k
  isl_schedule_tree *tree;
2424
1.98k
2425
1.98k
  if (check_insert(node) < 0)
2426
0
    node = isl_schedule_node_free(node);
2427
1.98k
  anchored = isl_schedule_node_is_subtree_anchored(node);
2428
1.98k
  if (anchored < 0)
2429
0
    goto error;
2430
1.98k
  if (anchored)
2431
1.98k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2432
1.98k
      "cannot insert band node in anchored subtree",
2433
1.98k
      goto error);
2434
1.98k
2435
1.98k
  tree = isl_schedule_node_get_tree(node);
2436
1.98k
  band = isl_schedule_band_from_multi_union_pw_aff(mupa);
2437
1.98k
  tree = isl_schedule_tree_insert_band(tree, band);
2438
1.98k
  node = isl_schedule_node_graft_tree(node, tree);
2439
1.98k
2440
1.98k
  return node;
2441
0
error:
2442
0
  isl_schedule_node_free(node);
2443
0
  isl_multi_union_pw_aff_free(mupa);
2444
0
  return NULL;
2445
1.98k
}
2446
2447
/* Insert a context node with context "context" between "node" and its parent.
2448
 * Return a pointer to the new context node.
2449
 */
2450
__isl_give isl_schedule_node *isl_schedule_node_insert_context(
2451
  __isl_take isl_schedule_node *node, __isl_take isl_set *context)
2452
0
{
2453
0
  isl_schedule_tree *tree;
2454
0
2455
0
  if (check_insert(node) < 0)
2456
0
    node = isl_schedule_node_free(node);
2457
0
2458
0
  tree = isl_schedule_node_get_tree(node);
2459
0
  tree = isl_schedule_tree_insert_context(tree, context);
2460
0
  node = isl_schedule_node_graft_tree(node, tree);
2461
0
2462
0
  return node;
2463
0
}
2464
2465
/* Insert an expansion node with the given "contraction" and "expansion"
2466
 * between "node" and its parent.
2467
 * Return a pointer to the new expansion node.
2468
 *
2469
 * Typically the domain and range spaces of the expansion are different.
2470
 * This means that only one of them can refer to the current domain space
2471
 * in a consistent tree.  It is up to the caller to ensure that the tree
2472
 * returns to a consistent state.
2473
 */
2474
__isl_give isl_schedule_node *isl_schedule_node_insert_expansion(
2475
  __isl_take isl_schedule_node *node,
2476
  __isl_take isl_union_pw_multi_aff *contraction,
2477
  __isl_take isl_union_map *expansion)
2478
3
{
2479
3
  isl_schedule_tree *tree;
2480
3
2481
3
  if (check_insert(node) < 0)
2482
0
    node = isl_schedule_node_free(node);
2483
3
2484
3
  tree = isl_schedule_node_get_tree(node);
2485
3
  tree = isl_schedule_tree_insert_expansion(tree, contraction, expansion);
2486
3
  node = isl_schedule_node_graft_tree(node, tree);
2487
3
2488
3
  return node;
2489
3
}
2490
2491
/* Insert an extension node with extension "extension" between "node" and
2492
 * its parent.
2493
 * Return a pointer to the new extension node.
2494
 */
2495
__isl_give isl_schedule_node *isl_schedule_node_insert_extension(
2496
  __isl_take isl_schedule_node *node,
2497
  __isl_take isl_union_map *extension)
2498
24
{
2499
24
  isl_schedule_tree *tree;
2500
24
2501
24
  tree = isl_schedule_node_get_tree(node);
2502
24
  tree = isl_schedule_tree_insert_extension(tree, extension);
2503
24
  node = isl_schedule_node_graft_tree(node, tree);
2504
24
2505
24
  return node;
2506
24
}
2507
2508
/* Insert a filter node with filter "filter" between "node" and its parent.
2509
 * Return a pointer to the new filter node.
2510
 */
2511
__isl_give isl_schedule_node *isl_schedule_node_insert_filter(
2512
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
2513
24
{
2514
24
  isl_schedule_tree *tree;
2515
24
2516
24
  if (check_insert(node) < 0)
2517
0
    node = isl_schedule_node_free(node);
2518
24
2519
24
  tree = isl_schedule_node_get_tree(node);
2520
24
  tree = isl_schedule_tree_insert_filter(tree, filter);
2521
24
  node = isl_schedule_node_graft_tree(node, tree);
2522
24
2523
24
  return node;
2524
24
}
2525
2526
/* Insert a guard node with guard "guard" between "node" and its parent.
2527
 * Return a pointer to the new guard node.
2528
 */
2529
__isl_give isl_schedule_node *isl_schedule_node_insert_guard(
2530
  __isl_take isl_schedule_node *node, __isl_take isl_set *guard)
2531
0
{
2532
0
  isl_schedule_tree *tree;
2533
0
2534
0
  if (check_insert(node) < 0)
2535
0
    node = isl_schedule_node_free(node);
2536
0
2537
0
  tree = isl_schedule_node_get_tree(node);
2538
0
  tree = isl_schedule_tree_insert_guard(tree, guard);
2539
0
  node = isl_schedule_node_graft_tree(node, tree);
2540
0
2541
0
  return node;
2542
0
}
2543
2544
/* Insert a mark node with mark identifier "mark" between "node" and
2545
 * its parent.
2546
 * Return a pointer to the new mark node.
2547
 */
2548
__isl_give isl_schedule_node *isl_schedule_node_insert_mark(
2549
  __isl_take isl_schedule_node *node, __isl_take isl_id *mark)
2550
161
{
2551
161
  isl_schedule_tree *tree;
2552
161
2553
161
  if (check_insert(node) < 0)
2554
0
    node = isl_schedule_node_free(node);
2555
161
2556
161
  tree = isl_schedule_node_get_tree(node);
2557
161
  tree = isl_schedule_tree_insert_mark(tree, mark);
2558
161
  node = isl_schedule_node_graft_tree(node, tree);
2559
161
2560
161
  return node;
2561
161
}
2562
2563
/* Attach the current subtree of "node" to a sequence of filter tree nodes
2564
 * with filters described by "filters", attach this sequence
2565
 * of filter tree nodes as children to a new tree of type "type" and
2566
 * replace the original subtree of "node" by this new tree.
2567
 * Each copy of the original subtree is simplified with respect
2568
 * to the corresponding filter.
2569
 */
2570
static __isl_give isl_schedule_node *isl_schedule_node_insert_children(
2571
  __isl_take isl_schedule_node *node,
2572
  enum isl_schedule_node_type type,
2573
  __isl_take isl_union_set_list *filters)
2574
73
{
2575
73
  int i, n;
2576
73
  isl_ctx *ctx;
2577
73
  isl_schedule_tree *tree;
2578
73
  isl_schedule_tree_list *list;
2579
73
2580
73
  if (check_insert(node) < 0)
2581
0
    node = isl_schedule_node_free(node);
2582
73
2583
73
  if (!node || !filters)
2584
0
    goto error;
2585
73
2586
73
  ctx = isl_schedule_node_get_ctx(node);
2587
73
  n = isl_union_set_list_n_union_set(filters);
2588
73
  list = isl_schedule_tree_list_alloc(ctx, n);
2589
231
  for (i = 0; i < n; 
++i158
) {
2590
158
    isl_schedule_node *node_i;
2591
158
    isl_schedule_tree *tree;
2592
158
    isl_union_set *filter;
2593
158
2594
158
    filter = isl_union_set_list_get_union_set(filters, i);
2595
158
    node_i = isl_schedule_node_copy(node);
2596
158
    node_i = isl_schedule_node_gist(node_i,
2597
158
            isl_union_set_copy(filter));
2598
158
    tree = isl_schedule_node_get_tree(node_i);
2599
158
    isl_schedule_node_free(node_i);
2600
158
    tree = isl_schedule_tree_insert_filter(tree, filter);
2601
158
    list = isl_schedule_tree_list_add(list, tree);
2602
158
  }
2603
73
  tree = isl_schedule_tree_from_children(type, list);
2604
73
  node = isl_schedule_node_graft_tree(node, tree);
2605
73
2606
73
  isl_union_set_list_free(filters);
2607
73
  return node;
2608
0
error:
2609
0
  isl_union_set_list_free(filters);
2610
0
  isl_schedule_node_free(node);
2611
0
  return NULL;
2612
73
}
2613
2614
/* Insert a sequence node with child filters "filters" between "node" and
2615
 * its parent.  That is, the tree that "node" points to is attached
2616
 * to each of the child nodes of the filter nodes.
2617
 * Return a pointer to the new sequence node.
2618
 */
2619
__isl_give isl_schedule_node *isl_schedule_node_insert_sequence(
2620
  __isl_take isl_schedule_node *node,
2621
  __isl_take isl_union_set_list *filters)
2622
60
{
2623
60
  return isl_schedule_node_insert_children(node,
2624
60
          isl_schedule_node_sequence, filters);
2625
60
}
2626
2627
/* Insert a set node with child filters "filters" between "node" and
2628
 * its parent.  That is, the tree that "node" points to is attached
2629
 * to each of the child nodes of the filter nodes.
2630
 * Return a pointer to the new set node.
2631
 */
2632
__isl_give isl_schedule_node *isl_schedule_node_insert_set(
2633
  __isl_take isl_schedule_node *node,
2634
  __isl_take isl_union_set_list *filters)
2635
13
{
2636
13
  return isl_schedule_node_insert_children(node,
2637
13
          isl_schedule_node_set, filters);
2638
13
}
2639
2640
/* Remove "node" from its schedule tree and return a pointer
2641
 * to the leaf at the same position in the updated schedule tree.
2642
 *
2643
 * It is not allowed to remove the root of a schedule tree or
2644
 * a child of a set or sequence node.
2645
 */
2646
__isl_give isl_schedule_node *isl_schedule_node_cut(
2647
  __isl_take isl_schedule_node *node)
2648
452
{
2649
452
  isl_schedule_tree *leaf;
2650
452
  enum isl_schedule_node_type parent_type;
2651
452
2652
452
  if (!node)
2653
0
    return NULL;
2654
452
  if (!isl_schedule_node_has_parent(node))
2655
452
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2656
452
      "cannot cut root", return isl_schedule_node_free(node));
2657
452
2658
452
  parent_type = isl_schedule_node_get_parent_type(node);
2659
452
  if (parent_type == isl_schedule_node_set ||
2660
452
      parent_type == isl_schedule_node_sequence)
2661
452
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2662
452
      "cannot cut child of set or sequence",
2663
452
      return isl_schedule_node_free(node));
2664
452
2665
452
  leaf = isl_schedule_node_get_leaf(node);
2666
452
  return isl_schedule_node_graft_tree(node, leaf);
2667
452
}
2668
2669
/* Remove a single node from the schedule tree, attaching the child
2670
 * of "node" directly to its parent.
2671
 * Return a pointer to this former child or to the leaf the position
2672
 * of the original node if there was no child.
2673
 * It is not allowed to remove the root of a schedule tree,
2674
 * a set or sequence node, a child of a set or sequence node or
2675
 * a band node with an anchored subtree.
2676
 */
2677
__isl_give isl_schedule_node *isl_schedule_node_delete(
2678
  __isl_take isl_schedule_node *node)
2679
446
{
2680
446
  int n;
2681
446
  isl_schedule_tree *tree;
2682
446
  enum isl_schedule_node_type type;
2683
446
2684
446
  if (!node)
2685
0
    return NULL;
2686
446
2687
446
  if (isl_schedule_node_get_tree_depth(node) == 0)
2688
446
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2689
446
      "cannot delete root node",
2690
446
      return isl_schedule_node_free(node));
2691
446
  n = isl_schedule_node_n_children(node);
2692
446
  if (n != 1)
2693
446
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2694
446
      "can only delete node with a single child",
2695
446
      return isl_schedule_node_free(node));
2696
446
  type = isl_schedule_node_get_parent_type(node);
2697
446
  if (type == isl_schedule_node_sequence || type == isl_schedule_node_set)
2698
446
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2699
446
      "cannot delete child of set or sequence",
2700
446
      return isl_schedule_node_free(node));
2701
446
  if (isl_schedule_node_get_type(node) == isl_schedule_node_band) {
2702
94
    int anchored;
2703
94
2704
94
    anchored = isl_schedule_node_is_subtree_anchored(node);
2705
94
    if (anchored < 0)
2706
0
      return isl_schedule_node_free(node);
2707
94
    if (anchored)
2708
94
      
isl_die0
(isl_schedule_node_get_ctx(node),
2709
94
        isl_error_invalid,
2710
94
        "cannot delete band node with anchored subtree",
2711
94
        return isl_schedule_node_free(node));
2712
94
  }
2713
446
2714
446
  tree = isl_schedule_node_get_tree(node);
2715
446
  if (!tree || isl_schedule_tree_has_children(tree)) {
2716
280
    tree = isl_schedule_tree_child(tree, 0);
2717
280
  } else {
2718
166
    isl_schedule_tree_free(tree);
2719
166
    tree = isl_schedule_node_get_leaf(node);
2720
166
  }
2721
446
  node = isl_schedule_node_graft_tree(node, tree);
2722
446
2723
446
  return node;
2724
446
}
2725
2726
/* Internal data structure for the group_ancestor callback.
2727
 *
2728
 * If "finished" is set, then we no longer need to modify
2729
 * any further ancestors.
2730
 *
2731
 * "contraction" and "expansion" represent the expansion
2732
 * that reflects the grouping.
2733
 *
2734
 * "domain" contains the domain elements that reach the position
2735
 * where the grouping is performed.  That is, it is the range
2736
 * of the resulting expansion.
2737
 * "domain_universe" is the universe of "domain".
2738
 * "group" is the set of group elements, i.e., the domain
2739
 * of the resulting expansion.
2740
 * "group_universe" is the universe of "group".
2741
 *
2742
 * "sched" is the schedule for the group elements, in pratice
2743
 * an identity mapping on "group_universe".
2744
 * "dim" is the dimension of "sched".
2745
 */
2746
struct isl_schedule_group_data {
2747
  int finished;
2748
2749
  isl_union_map *expansion;
2750
  isl_union_pw_multi_aff *contraction;
2751
2752
  isl_union_set *domain;
2753
  isl_union_set *domain_universe;
2754
  isl_union_set *group;
2755
  isl_union_set *group_universe;
2756
2757
  int dim;
2758
  isl_multi_aff *sched;
2759
};
2760
2761
/* Is domain covered by data->domain within data->domain_universe?
2762
 */
2763
static int locally_covered_by_domain(__isl_keep isl_union_set *domain,
2764
  struct isl_schedule_group_data *data)
2765
3
{
2766
3
  int is_subset;
2767
3
  isl_union_set *test;
2768
3
2769
3
  test = isl_union_set_copy(domain);
2770
3
  test = isl_union_set_intersect(test,
2771
3
          isl_union_set_copy(data->domain_universe));
2772
3
  is_subset = isl_union_set_is_subset(test, data->domain);
2773
3
  isl_union_set_free(test);
2774
3
2775
3
  return is_subset;
2776
3
}
2777
2778
/* Update the band tree root "tree" to refer to the group instances
2779
 * in data->group rather than the original domain elements in data->domain.
2780
 * "pos" is the position in the original schedule tree where the modified
2781
 * "tree" will be attached.
2782
 *
2783
 * Add the part of the identity schedule on the group instances data->sched
2784
 * that corresponds to this band node to the band schedule.
2785
 * If the domain elements that reach the node and that are part
2786
 * of data->domain_universe are all elements of data->domain (and therefore
2787
 * replaced by the group instances) then this data->domain_universe
2788
 * is removed from the domain of the band schedule.
2789
 */
2790
static __isl_give isl_schedule_tree *group_band(
2791
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
2792
  struct isl_schedule_group_data *data)
2793
2
{
2794
2
  isl_union_set *domain;
2795
2
  isl_multi_aff *ma;
2796
2
  isl_multi_union_pw_aff *mupa, *partial;
2797
2
  int is_covered;
2798
2
  int depth, n, has_id;
2799
2
2800
2
  domain = isl_schedule_node_get_domain(pos);
2801
2
  is_covered = locally_covered_by_domain(domain, data);
2802
2
  if (is_covered >= 0 && is_covered) {
2803
2
    domain = isl_union_set_universe(domain);
2804
2
    domain = isl_union_set_subtract(domain,
2805
2
          isl_union_set_copy(data->domain_universe));
2806
2
    tree = isl_schedule_tree_band_intersect_domain(tree, domain);
2807
2
  } else
2808
0
    isl_union_set_free(domain);
2809
2
  if (is_covered < 0)
2810
0
    return isl_schedule_tree_free(tree);
2811
2
  depth = isl_schedule_node_get_schedule_depth(pos);
2812
2
  n = isl_schedule_tree_band_n_member(tree);
2813
2
  ma = isl_multi_aff_copy(data->sched);
2814
2
  ma = isl_multi_aff_drop_dims(ma, isl_dim_out, 0, depth);
2815
2
  ma = isl_multi_aff_drop_dims(ma, isl_dim_out, n, data->dim - depth - n);
2816
2
  mupa = isl_multi_union_pw_aff_from_multi_aff(ma);
2817
2
  partial = isl_schedule_tree_band_get_partial_schedule(tree);
2818
2
  has_id = isl_multi_union_pw_aff_has_tuple_id(partial, isl_dim_set);
2819
2
  if (has_id < 0) {
2820
0
    partial = isl_multi_union_pw_aff_free(partial);
2821
2
  } else if (has_id) {
2822
0
    isl_id *id;
2823
0
    id = isl_multi_union_pw_aff_get_tuple_id(partial, isl_dim_set);
2824
0
    mupa = isl_multi_union_pw_aff_set_tuple_id(mupa,
2825
0
                  isl_dim_set, id);
2826
0
  }
2827
2
  partial = isl_multi_union_pw_aff_union_add(partial, mupa);
2828
2
  tree = isl_schedule_tree_band_set_partial_schedule(tree, partial);
2829
2
2830
2
  return tree;
2831
2
}
2832
2833
/* Drop the parameters in "uset" that are not also in "space".
2834
 * "n" is the number of parameters in "space".
2835
 */
2836
static __isl_give isl_union_set *union_set_drop_extra_params(
2837
  __isl_take isl_union_set *uset, __isl_keep isl_space *space, int n)
2838
0
{
2839
0
  int n2;
2840
0
2841
0
  uset = isl_union_set_align_params(uset, isl_space_copy(space));
2842
0
  n2 = isl_union_set_dim(uset, isl_dim_param);
2843
0
  uset = isl_union_set_project_out(uset, isl_dim_param, n, n2 - n);
2844
0
2845
0
  return uset;
2846
0
}
2847
2848
/* Update the context tree root "tree" to refer to the group instances
2849
 * in data->group rather than the original domain elements in data->domain.
2850
 * "pos" is the position in the original schedule tree where the modified
2851
 * "tree" will be attached.
2852
 *
2853
 * We do not actually need to update "tree" since a context node only
2854
 * refers to the schedule space.  However, we may need to update "data"
2855
 * to not refer to any parameters introduced by the context node.
2856
 */
2857
static __isl_give isl_schedule_tree *group_context(
2858
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
2859
  struct isl_schedule_group_data *data)
2860
0
{
2861
0
  isl_space *space;
2862
0
  isl_union_set *domain;
2863
0
  int n1, n2;
2864
0
  int involves;
2865
0
2866
0
  if (isl_schedule_node_get_tree_depth(pos) == 1)
2867
0
    return tree;
2868
0
2869
0
  domain = isl_schedule_node_get_universe_domain(pos);
2870
0
  space = isl_union_set_get_space(domain);
2871
0
  isl_union_set_free(domain);
2872
0
2873
0
  n1 = isl_space_dim(space, isl_dim_param);
2874
0
  data->expansion = isl_union_map_align_params(data->expansion, space);
2875
0
  n2 = isl_union_map_dim(data->expansion, isl_dim_param);
2876
0
2877
0
  if (!data->expansion)
2878
0
    return isl_schedule_tree_free(tree);
2879
0
  if (n1 == n2)
2880
0
    return tree;
2881
0
2882
0
  involves = isl_union_map_involves_dims(data->expansion,
2883
0
        isl_dim_param, n1, n2 - n1);
2884
0
  if (involves < 0)
2885
0
    return isl_schedule_tree_free(tree);
2886
0
  if (involves)
2887
0
    isl_die(isl_schedule_node_get_ctx(pos), isl_error_invalid,
2888
0
      "grouping cannot only refer to global parameters",
2889
0
      return isl_schedule_tree_free(tree));
2890
0
2891
0
  data->expansion = isl_union_map_project_out(data->expansion,
2892
0
        isl_dim_param, n1, n2 - n1);
2893
0
  space = isl_union_map_get_space(data->expansion);
2894
0
2895
0
  data->contraction = isl_union_pw_multi_aff_align_params(
2896
0
        data->contraction, isl_space_copy(space));
2897
0
  n2 = isl_union_pw_multi_aff_dim(data->contraction, isl_dim_param);
2898
0
  data->contraction = isl_union_pw_multi_aff_drop_dims(data->contraction,
2899
0
        isl_dim_param, n1, n2 - n1);
2900
0
2901
0
  data->domain = union_set_drop_extra_params(data->domain, space, n1);
2902
0
  data->domain_universe =
2903
0
    union_set_drop_extra_params(data->domain_universe, space, n1);
2904
0
  data->group = union_set_drop_extra_params(data->group, space, n1);
2905
0
  data->group_universe =
2906
0
    union_set_drop_extra_params(data->group_universe, space, n1);
2907
0
2908
0
  data->sched = isl_multi_aff_align_params(data->sched,
2909
0
        isl_space_copy(space));
2910
0
  n2 = isl_multi_aff_dim(data->sched, isl_dim_param);
2911
0
  data->sched = isl_multi_aff_drop_dims(data->sched,
2912
0
        isl_dim_param, n1, n2 - n1);
2913
0
2914
0
  isl_space_free(space);
2915
0
2916
0
  return tree;
2917
0
}
2918
2919
/* Update the domain tree root "tree" to refer to the group instances
2920
 * in data->group rather than the original domain elements in data->domain.
2921
 * "pos" is the position in the original schedule tree where the modified
2922
 * "tree" will be attached.
2923
 *
2924
 * We first double-check that all grouped domain elements are actually
2925
 * part of the root domain and then replace those elements by the group
2926
 * instances.
2927
 */
2928
static __isl_give isl_schedule_tree *group_domain(
2929
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
2930
  struct isl_schedule_group_data *data)
2931
2
{
2932
2
  isl_union_set *domain;
2933
2
  int is_subset;
2934
2
2935
2
  domain = isl_schedule_tree_domain_get_domain(tree);
2936
2
  is_subset = isl_union_set_is_subset(data->domain, domain);
2937
2
  isl_union_set_free(domain);
2938
2
  if (is_subset < 0)
2939
0
    return isl_schedule_tree_free(tree);
2940
2
  if (!is_subset)
2941
2
    
isl_die0
(isl_schedule_tree_get_ctx(tree), isl_error_internal,
2942
2
      "grouped domain should be part of outer domain",
2943
2
      return isl_schedule_tree_free(tree));
2944
2
  domain = isl_schedule_tree_domain_get_domain(tree);
2945
2
  domain = isl_union_set_subtract(domain,
2946
2
        isl_union_set_copy(data->domain));
2947
2
  domain = isl_union_set_union(domain, isl_union_set_copy(data->group));
2948
2
  tree = isl_schedule_tree_domain_set_domain(tree, domain);
2949
2
2950
2
  return tree;
2951
2
}
2952
2953
/* Update the expansion tree root "tree" to refer to the group instances
2954
 * in data->group rather than the original domain elements in data->domain.
2955
 * "pos" is the position in the original schedule tree where the modified
2956
 * "tree" will be attached.
2957
 *
2958
 * Let G_1 -> D_1 be the expansion of "tree" and G_2 -> D_2 the newly
2959
 * introduced expansion in a descendant of "tree".
2960
 * We first double-check that D_2 is a subset of D_1.
2961
 * Then we remove D_2 from the range of G_1 -> D_1 and add the mapping
2962
 * G_1 -> D_1 . D_2 -> G_2.
2963
 * Simmilarly, we restrict the domain of the contraction to the universe
2964
 * of the range of the updated expansion and add G_2 -> D_2 . D_1 -> G_1,
2965
 * attempting to remove the domain constraints of this additional part.
2966
 */
2967
static __isl_give isl_schedule_tree *group_expansion(
2968
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
2969
  struct isl_schedule_group_data *data)
2970
1
{
2971
1
  isl_union_set *domain;
2972
1
  isl_union_map *expansion, *umap;
2973
1
  isl_union_pw_multi_aff *contraction, *upma;
2974
1
  int is_subset;
2975
1
2976
1
  expansion = isl_schedule_tree_expansion_get_expansion(tree);
2977
1
  domain = isl_union_map_range(expansion);
2978
1
  is_subset = isl_union_set_is_subset(data->domain, domain);
2979
1
  isl_union_set_free(domain);
2980
1
  if (is_subset < 0)
2981
0
    return isl_schedule_tree_free(tree);
2982
1
  if (!is_subset)
2983
1
    
isl_die0
(isl_schedule_tree_get_ctx(tree), isl_error_internal,
2984
1
      "grouped domain should be part "
2985
1
      "of outer expansion domain",
2986
1
      return isl_schedule_tree_free(tree));
2987
1
  expansion = isl_schedule_tree_expansion_get_expansion(tree);
2988
1
  umap = isl_union_map_from_union_pw_multi_aff(
2989
1
      isl_union_pw_multi_aff_copy(data->contraction));
2990
1
  umap = isl_union_map_apply_range(expansion, umap);
2991
1
  expansion = isl_schedule_tree_expansion_get_expansion(tree);
2992
1
  expansion = isl_union_map_subtract_range(expansion,
2993
1
        isl_union_set_copy(data->domain));
2994
1
  expansion = isl_union_map_union(expansion, umap);
2995
1
  umap = isl_union_map_universe(isl_union_map_copy(expansion));
2996
1
  domain = isl_union_map_range(umap);
2997
1
  contraction = isl_schedule_tree_expansion_get_contraction(tree);
2998
1
  umap = isl_union_map_from_union_pw_multi_aff(contraction);
2999
1
  umap = isl_union_map_apply_range(isl_union_map_copy(data->expansion),
3000
1
          umap);
3001
1
  upma = isl_union_pw_multi_aff_from_union_map(umap);
3002
1
  contraction = isl_schedule_tree_expansion_get_contraction(tree);
3003
1
  contraction = isl_union_pw_multi_aff_intersect_domain(contraction,
3004
1
                domain);
3005
1
  domain = isl_union_pw_multi_aff_domain(
3006
1
        isl_union_pw_multi_aff_copy(upma));
3007
1
  upma = isl_union_pw_multi_aff_gist(upma, domain);
3008
1
  contraction = isl_union_pw_multi_aff_union_add(contraction, upma);
3009
1
  tree = isl_schedule_tree_expansion_set_contraction_and_expansion(tree,
3010
1
              contraction, expansion);
3011
1
3012
1
  return tree;
3013
1
}
3014
3015
/* Update the tree root "tree" to refer to the group instances
3016
 * in data->group rather than the original domain elements in data->domain.
3017
 * "pos" is the position in the original schedule tree where the modified
3018
 * "tree" will be attached.
3019
 *
3020
 * If we have come across a domain or expansion node before (data->finished
3021
 * is set), then we no longer need perform any modifications.
3022
 *
3023
 * If "tree" is a filter, then we add data->group_universe to the filter.
3024
 * We also remove data->domain_universe from the filter if all the domain
3025
 * elements in this universe that reach the filter node are part of
3026
 * the elements that are being grouped by data->expansion.
3027
 * If "tree" is a band, domain or expansion, then it is handled
3028
 * in a separate function.
3029
 */
3030
static __isl_give isl_schedule_tree *group_ancestor(
3031
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
3032
  void *user)
3033
9
{
3034
9
  struct isl_schedule_group_data *data = user;
3035
9
  isl_union_set *domain;
3036
9
  int is_covered;
3037
9
3038
9
  if (!tree || !pos)
3039
0
    return isl_schedule_tree_free(tree);
3040
9
3041
9
  if (data->finished)
3042
2
    return tree;
3043
7
3044
7
  switch (isl_schedule_tree_get_type(tree)) {
3045
7
  case isl_schedule_node_error:
3046
0
    return isl_schedule_tree_free(tree);
3047
7
  case isl_schedule_node_extension:
3048
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_unsupported,
3049
0
      "grouping not allowed in extended tree",
3050
0
      return isl_schedule_tree_free(tree));
3051
2
  case isl_schedule_node_band:
3052
2
    tree = group_band(tree, pos, data);
3053
2
    break;
3054
0
  case isl_schedule_node_context:
3055
0
    tree = group_context(tree, pos, data);
3056
0
    break;
3057
2
  case isl_schedule_node_domain:
3058
2
    tree = group_domain(tree, pos, data);
3059
2
    data->finished = 1;
3060
2
    break;
3061
1
  case isl_schedule_node_filter:
3062
1
    domain = isl_schedule_node_get_domain(pos);
3063
1
    is_covered = locally_covered_by_domain(domain, data);
3064
1
    isl_union_set_free(domain);
3065
1
    if (is_covered < 0)
3066
0
      return isl_schedule_tree_free(tree);
3067
1
    domain = isl_schedule_tree_filter_get_filter(tree);
3068
1
    if (is_covered)
3069
1
      domain = isl_union_set_subtract(domain,
3070
1
            isl_union_set_copy(data->domain_universe));
3071
1
    domain = isl_union_set_union(domain,
3072
1
            isl_union_set_copy(data->group_universe));
3073
1
    tree = isl_schedule_tree_filter_set_filter(tree, domain);
3074
1
    break;
3075
1
  case isl_schedule_node_expansion:
3076
1
    tree = group_expansion(tree, pos, data);
3077
1
    data->finished = 1;
3078
1
    break;
3079
1
  case isl_schedule_node_leaf:
3080
1
  case isl_schedule_node_guard:
3081
1
  case isl_schedule_node_mark:
3082
1
  case isl_schedule_node_sequence:
3083
1
  case isl_schedule_node_set:
3084
1
    break;
3085
7
  }
3086
7
3087
7
  return tree;
3088
7
}
3089
3090
/* Group the domain elements that reach "node" into instances
3091
 * of a single statement with identifier "group_id".
3092
 * In particular, group the domain elements according to their
3093
 * prefix schedule.
3094
 *
3095
 * That is, introduce an expansion node with as contraction
3096
 * the prefix schedule (with the target space replaced by "group_id")
3097
 * and as expansion the inverse of this contraction (with its range
3098
 * intersected with the domain elements that reach "node").
3099
 * The outer nodes are then modified to refer to the group instances
3100
 * instead of the original domain elements.
3101
 *
3102
 * No instance of "group_id" is allowed to reach "node" prior
3103
 * to the grouping.
3104
 * No ancestor of "node" is allowed to be an extension node.
3105
 *
3106
 * Return a pointer to original node in tree, i.e., the child
3107
 * of the newly introduced expansion node.
3108
 */
3109
__isl_give isl_schedule_node *isl_schedule_node_group(
3110
  __isl_take isl_schedule_node *node, __isl_take isl_id *group_id)
3111
3
{
3112
3
  struct isl_schedule_group_data data = { 0 };
3113
3
  isl_space *space;
3114
3
  isl_union_set *domain;
3115
3
  isl_union_pw_multi_aff *contraction;
3116
3
  isl_union_map *expansion;
3117
3
  int disjoint;
3118
3
3119
3
  if (!node || !group_id)
3120
0
    goto error;
3121
3
  if (check_insert(node) < 0)
3122
0
    goto error;
3123
3
3124
3
  domain = isl_schedule_node_get_domain(node);
3125
3
  data.domain = isl_union_set_copy(domain);
3126
3
  data.domain_universe = isl_union_set_copy(domain);
3127
3
  data.domain_universe = isl_union_set_universe(data.domain_universe);
3128
3
3129
3
  data.dim = isl_schedule_node_get_schedule_depth(node);
3130
3
  if (data.dim == 0) {
3131
0
    isl_ctx *ctx;
3132
0
    isl_set *set;
3133
0
    isl_union_set *group;
3134
0
    isl_union_map *univ;
3135
0
3136
0
    ctx = isl_schedule_node_get_ctx(node);
3137
0
    space = isl_space_set_alloc(ctx, 0, 0);
3138
0
    space = isl_space_set_tuple_id(space, isl_dim_set, group_id);
3139
0
    set = isl_set_universe(isl_space_copy(space));
3140
0
    group = isl_union_set_from_set(set);
3141
0
    expansion = isl_union_map_from_domain_and_range(domain, group);
3142
0
    univ = isl_union_map_universe(isl_union_map_copy(expansion));
3143
0
    contraction = isl_union_pw_multi_aff_from_union_map(univ);
3144
0
    expansion = isl_union_map_reverse(expansion);
3145
3
  } else {
3146
3
    isl_multi_union_pw_aff *prefix;
3147
3
    isl_union_set *univ;
3148
3
3149
3
    prefix =
3150
3
    isl_schedule_node_get_prefix_schedule_multi_union_pw_aff(node);
3151
3
    prefix = isl_multi_union_pw_aff_set_tuple_id(prefix,
3152
3
              isl_dim_set, group_id);
3153
3
    space = isl_multi_union_pw_aff_get_space(prefix);
3154
3
    contraction = isl_union_pw_multi_aff_from_multi_union_pw_aff(
3155
3
              prefix);
3156
3
    univ = isl_union_set_universe(isl_union_set_copy(domain));
3157
3
    contraction =
3158
3
        isl_union_pw_multi_aff_intersect_domain(contraction, univ);
3159
3
    expansion = isl_union_map_from_union_pw_multi_aff(
3160
3
            isl_union_pw_multi_aff_copy(contraction));
3161
3
    expansion = isl_union_map_reverse(expansion);
3162
3
    expansion = isl_union_map_intersect_range(expansion, domain);
3163
3
  }
3164
3
  space = isl_space_map_from_set(space);
3165
3
  data.sched = isl_multi_aff_identity(space);
3166
3
  data.group = isl_union_map_domain(isl_union_map_copy(expansion));
3167
3
  data.group = isl_union_set_coalesce(data.group);
3168
3
  data.group_universe = isl_union_set_copy(data.group);
3169
3
  data.group_universe = isl_union_set_universe(data.group_universe);
3170
3
  data.expansion = isl_union_map_copy(expansion);
3171
3
  data.contraction = isl_union_pw_multi_aff_copy(contraction);
3172
3
  node = isl_schedule_node_insert_expansion(node, contraction, expansion);
3173
3
3174
3
  disjoint = isl_union_set_is_disjoint(data.domain_universe,
3175
3
              data.group_universe);
3176
3
3177
3
  node = update_ancestors(node, &group_ancestor, &data);
3178
3
3179
3
  isl_union_set_free(data.domain);
3180
3
  isl_union_set_free(data.domain_universe);
3181
3
  isl_union_set_free(data.group);
3182
3
  isl_union_set_free(data.group_universe);
3183
3
  isl_multi_aff_free(data.sched);
3184
3
  isl_union_map_free(data.expansion);
3185
3
  isl_union_pw_multi_aff_free(data.contraction);
3186
3
3187
3
  node = isl_schedule_node_child(node, 0);
3188
3
3189
3
  if (!node || disjoint < 0)
3190
0
    return isl_schedule_node_free(node);
3191
3
  if (!disjoint)
3192
3
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
3193
3
      "group instances already reach node",
3194
3
      return isl_schedule_node_free(node));
3195
3
3196
3
  return node;
3197
0
error:
3198
0
  isl_schedule_node_free(node);
3199
0
  isl_id_free(group_id);
3200
0
  return NULL;
3201
3
}
3202
3203
/* Compute the gist of the given band node with respect to "context".
3204
 */
3205
__isl_give isl_schedule_node *isl_schedule_node_band_gist(
3206
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *context)
3207
307
{
3208
307
  isl_schedule_tree *tree;
3209
307
3210
307
  tree = isl_schedule_node_get_tree(node);
3211
307
  tree = isl_schedule_tree_band_gist(tree, context);
3212
307
  return isl_schedule_node_graft_tree(node, tree);
3213
307
}
3214
3215
/* Internal data structure for isl_schedule_node_gist.
3216
 * "n_expansion" is the number of outer expansion nodes
3217
 * with respect to the current position
3218
 * "filters" contains an element for each outer filter, expansion or
3219
 * extension node with respect to the current position, each representing
3220
 * the intersection of the previous element and the filter on the filter node
3221
 * or the expansion/extension of the previous element.
3222
 * The first element in the original context passed to isl_schedule_node_gist.
3223
 */
3224
struct isl_node_gist_data {
3225
  int n_expansion;
3226
  isl_union_set_list *filters;
3227
};
3228
3229
/* Enter the expansion node "node" during a isl_schedule_node_gist traversal.
3230
 *
3231
 * In particular, add an extra element to data->filters containing
3232
 * the expansion of the previous element and replace the expansion
3233
 * and contraction on "node" by the gist with respect to these filters.
3234
 * Also keep track of the fact that we have entered another expansion.
3235
 */
3236
static __isl_give isl_schedule_node *gist_enter_expansion(
3237
  __isl_take isl_schedule_node *node, struct isl_node_gist_data *data)
3238
0
{
3239
0
  int n;
3240
0
  isl_union_set *inner;
3241
0
  isl_union_map *expansion;
3242
0
  isl_union_pw_multi_aff *contraction;
3243
0
3244
0
  data->n_expansion++;
3245
0
3246
0
  n = isl_union_set_list_n_union_set(data->filters);
3247
0
  inner = isl_union_set_list_get_union_set(data->filters, n - 1);
3248
0
  expansion = isl_schedule_node_expansion_get_expansion(node);
3249
0
  inner = isl_union_set_apply(inner, expansion);
3250
0
3251
0
  contraction = isl_schedule_node_expansion_get_contraction(node);
3252
0
  contraction = isl_union_pw_multi_aff_gist(contraction,
3253
0
            isl_union_set_copy(inner));
3254
0
3255
0
  data->filters = isl_union_set_list_add(data->filters, inner);
3256
0
3257
0
  inner = isl_union_set_list_get_union_set(data->filters, n - 1);
3258
0
  expansion = isl_schedule_node_expansion_get_expansion(node);
3259
0
  expansion = isl_union_map_gist_domain(expansion, inner);
3260
0
  node = isl_schedule_node_expansion_set_contraction_and_expansion(node,
3261
0
            contraction, expansion);
3262
0
3263
0
  return node;
3264
0
}
3265
3266
/* Leave the expansion node "node" during a isl_schedule_node_gist traversal.
3267
 *
3268
 * In particular, remove the element in data->filters that was added by
3269
 * gist_enter_expansion and decrement the number of outer expansions.
3270
 *
3271
 * The expansion has already been simplified in gist_enter_expansion.
3272
 * If this simplification results in an identity expansion, then
3273
 * it is removed here.
3274
 */
3275
static __isl_give isl_schedule_node *gist_leave_expansion(
3276
  __isl_take isl_schedule_node *node, struct isl_node_gist_data *data)
3277
0
{
3278
0
  int n;
3279
0
  isl_bool identity;
3280
0
  isl_union_map *expansion;
3281
0
3282
0
  expansion = isl_schedule_node_expansion_get_expansion(node);
3283
0
  identity = isl_union_map_is_identity(expansion);
3284
0
  isl_union_map_free(expansion);
3285
0
3286
0
  if (identity < 0)
3287
0
    node = isl_schedule_node_free(node);
3288
0
  else if (identity)
3289
0
    node = isl_schedule_node_delete(node);
3290
0
3291
0
  n = isl_union_set_list_n_union_set(data->filters);
3292
0
  data->filters = isl_union_set_list_drop(data->filters, n - 1, 1);
3293
0
3294
0
  data->n_expansion--;
3295
0
3296
0
  return node;
3297
0
}
3298
3299
/* Enter the extension node "node" during a isl_schedule_node_gist traversal.
3300
 *
3301
 * In particular, add an extra element to data->filters containing
3302
 * the union of the previous element with the additional domain elements
3303
 * introduced by the extension.
3304
 */
3305
static __isl_give isl_schedule_node *gist_enter_extension(
3306
  __isl_take isl_schedule_node *node, struct isl_node_gist_data *data)
3307
0
{
3308
0
  int n;
3309
0
  isl_union_set *inner, *extra;
3310
0
  isl_union_map *extension;
3311
0
3312
0
  n = isl_union_set_list_n_union_set(data->filters);
3313
0
  inner = isl_union_set_list_get_union_set(data->filters, n - 1);
3314
0
  extension = isl_schedule_node_extension_get_extension(node);
3315
0
  extra = isl_union_map_range(extension);
3316
0
  inner = isl_union_set_union(inner, extra);
3317
0
3318
0
  data->filters = isl_union_set_list_add(data->filters, inner);
3319
0
3320
0
  return node;
3321
0
}
3322
3323
/* Can we finish gisting at this node?
3324
 * That is, is the filter on the current filter node a subset of
3325
 * the original context passed to isl_schedule_node_gist?
3326
 * If we have gone through any expansions, then we cannot perform
3327
 * this test since the current domain elements are incomparable
3328
 * to the domain elements in the original context.
3329
 */
3330
static int gist_done(__isl_keep isl_schedule_node *node,
3331
  struct isl_node_gist_data *data)
3332
1.26k
{
3333
1.26k
  isl_union_set *filter, *outer;
3334
1.26k
  int subset;
3335
1.26k
3336
1.26k
  if (data->n_expansion != 0)
3337
0
    return 0;
3338
1.26k
3339
1.26k
  filter = isl_schedule_node_filter_get_filter(node);
3340
1.26k
  outer = isl_union_set_list_get_union_set(data->filters, 0);
3341
1.26k
  subset = isl_union_set_is_subset(filter, outer);
3342
1.26k
  isl_union_set_free(outer);
3343
1.26k
  isl_union_set_free(filter);
3344
1.26k
3345
1.26k
  return subset;
3346
1.26k
}
3347
3348
/* Callback for "traverse" to enter a node and to move
3349
 * to the deepest initial subtree that should be traversed
3350
 * by isl_schedule_node_gist.
3351
 *
3352
 * The "filters" list is extended by one element each time
3353
 * we come across a filter node by the result of intersecting
3354
 * the last element in the list with the filter on the filter node.
3355
 *
3356
 * If the filter on the current filter node is a subset of
3357
 * the original context passed to isl_schedule_node_gist,
3358
 * then there is no need to go into its subtree since it cannot
3359
 * be further simplified by the context.  The "filters" list is
3360
 * still extended for consistency, but the actual value of the
3361
 * added element is immaterial since it will not be used.
3362
 *
3363
 * Otherwise, the filter on the current filter node is replaced by
3364
 * the gist of the original filter with respect to the intersection
3365
 * of the original context with the intermediate filters.
3366
 *
3367
 * If the new element in the "filters" list is empty, then no elements
3368
 * can reach the descendants of the current filter node.  The subtree
3369
 * underneath the filter node is therefore removed.
3370
 *
3371
 * Each expansion node we come across is handled by
3372
 * gist_enter_expansion.
3373
 *
3374
 * Each extension node we come across is handled by
3375
 * gist_enter_extension.
3376
 */
3377
static __isl_give isl_schedule_node *gist_enter(
3378
  __isl_take isl_schedule_node *node, void *user)
3379
1.36k
{
3380
1.36k
  struct isl_node_gist_data *data = user;
3381
1.36k
3382
2.68k
  do {
3383
2.68k
    isl_union_set *filter, *inner;
3384
2.68k
    int done, empty;
3385
2.68k
    int n;
3386
2.68k
3387
2.68k
    switch (isl_schedule_node_get_type(node)) {
3388
2.68k
    case isl_schedule_node_error:
3389
0
      return isl_schedule_node_free(node);
3390
2.68k
    case isl_schedule_node_expansion:
3391
0
      node = gist_enter_expansion(node, data);
3392
0
      continue;
3393
2.68k
    case isl_schedule_node_extension:
3394
0
      node = gist_enter_extension(node, data);
3395
0
      continue;
3396
2.68k
    case isl_schedule_node_band:
3397
1.42k
    case isl_schedule_node_context:
3398
1.42k
    case isl_schedule_node_domain:
3399
1.42k
    case isl_schedule_node_guard:
3400
1.42k
    case isl_schedule_node_leaf:
3401
1.42k
    case isl_schedule_node_mark:
3402
1.42k
    case isl_schedule_node_sequence:
3403
1.42k
    case isl_schedule_node_set:
3404
1.42k
      continue;
3405
1.42k
    case isl_schedule_node_filter:
3406
1.26k
      break;
3407
1.26k
    }
3408
1.26k
    done = gist_done(node, data);
3409
1.26k
    filter = isl_schedule_node_filter_get_filter(node);
3410
1.26k
    if (done < 0 || done) {
3411
117
      data->filters = isl_union_set_list_add(data->filters,
3412
117
                filter);
3413
117
      if (done < 0)
3414
0
        return isl_schedule_node_free(node);
3415
117
      return node;
3416
117
    }
3417
1.14k
    n = isl_union_set_list_n_union_set(data->filters);
3418
1.14k
    inner = isl_union_set_list_get_union_set(data->filters, n - 1);
3419
1.14k
    filter = isl_union_set_gist(filter, isl_union_set_copy(inner));
3420
1.14k
    node = isl_schedule_node_filter_set_filter(node,
3421
1.14k
            isl_union_set_copy(filter));
3422
1.14k
    filter = isl_union_set_intersect(filter, inner);
3423
1.14k
    empty = isl_union_set_is_empty(filter);
3424
1.14k
    data->filters = isl_union_set_list_add(data->filters, filter);
3425
1.14k
    if (empty < 0)
3426
0
      return isl_schedule_node_free(node);
3427
1.14k
    if (!empty)
3428
697
      continue;
3429
452
    node = isl_schedule_node_child(node, 0);
3430
452
    node = isl_schedule_node_cut(node);
3431
452
    node = isl_schedule_node_parent(node);
3432
452
    return node;
3433
2.11k
  } while (isl_schedule_node_has_children(node) &&
3434
2.11k
    
(node = isl_schedule_node_first_child(node)) != NULL1.32k
);
3435
1.36k
3436
1.36k
  
return node791
;
3437
1.36k
}
3438
3439
/* Callback for "traverse" to leave a node for isl_schedule_node_gist.
3440
 *
3441
 * In particular, if the current node is a filter node, then we remove
3442
 * the element on the "filters" list that was added when we entered
3443
 * the node.  There is no need to compute any gist here, since we
3444
 * already did that when we entered the node.
3445
 *
3446
 * Expansion nodes are handled by gist_leave_expansion.
3447
 *
3448
 * If the current node is an extension, then remove the element
3449
 * in data->filters that was added by gist_enter_extension.
3450
 *
3451
 * If the current node is a band node, then we compute the gist of
3452
 * the band node with respect to the intersection of the original context
3453
 * and the intermediate filters.
3454
 *
3455
 * If the current node is a sequence or set node, then some of
3456
 * the filter children may have become empty and so they are removed.
3457
 * If only one child is left, then the set or sequence node along with
3458
 * the single remaining child filter is removed.  The filter can be
3459
 * removed because the filters on a sequence or set node are supposed
3460
 * to partition the incoming domain instances.
3461
 * In principle, it should then be impossible for there to be zero
3462
 * remaining children, but should this happen, we replace the entire
3463
 * subtree with an empty filter.
3464
 */
3465
static __isl_give isl_schedule_node *gist_leave(
3466
  __isl_take isl_schedule_node *node, void *user)
3467
2.68k
{
3468
2.68k
  struct isl_node_gist_data *data = user;
3469
2.68k
  isl_schedule_tree *tree;
3470
2.68k
  int i, n;
3471
2.68k
  isl_union_set *filter;
3472
2.68k
3473
2.68k
  switch (isl_schedule_node_get_type(node)) {
3474
2.68k
  case isl_schedule_node_error:
3475
0
    return isl_schedule_node_free(node);
3476
2.68k
  case isl_schedule_node_expansion:
3477
0
    node = gist_leave_expansion(node, data);
3478
0
    break;
3479
2.68k
  case isl_schedule_node_extension:
3480
1.26k
  case isl_schedule_node_filter:
3481
1.26k
    n = isl_union_set_list_n_union_set(data->filters);
3482
1.26k
    data->filters = isl_union_set_list_drop(data->filters,
3483
1.26k
              n - 1, 1);
3484
1.26k
    break;
3485
1.26k
  case isl_schedule_node_band:
3486
307
    n = isl_union_set_list_n_union_set(data->filters);
3487
307
    filter = isl_union_set_list_get_union_set(data->filters, n - 1);
3488
307
    node = isl_schedule_node_band_gist(node, filter);
3489
307
    break;
3490
1.26k
  case isl_schedule_node_set:
3491
324
  case isl_schedule_node_sequence:
3492
324
    tree = isl_schedule_node_get_tree(node);
3493
324
    n = isl_schedule_tree_n_children(tree);
3494
1.59k
    for (i = n - 1; i >= 0; 
--i1.26k
) {
3495
1.26k
      isl_schedule_tree *child;
3496
1.26k
      isl_union_set *filter;
3497
1.26k
      int empty;
3498
1.26k
3499
1.26k
      child = isl_schedule_tree_get_child(tree, i);
3500
1.26k
      filter = isl_schedule_tree_filter_get_filter(child);
3501
1.26k
      empty = isl_union_set_is_empty(filter);
3502
1.26k
      isl_union_set_free(filter);
3503
1.26k
      isl_schedule_tree_free(child);
3504
1.26k
      if (empty < 0)
3505
0
        tree = isl_schedule_tree_free(tree);
3506
1.26k
      else if (empty)
3507
452
        tree = isl_schedule_tree_drop_child(tree, i);
3508
1.26k
    }
3509
324
    n = isl_schedule_tree_n_children(tree);
3510
324
    node = isl_schedule_node_graft_tree(node, tree);
3511
324
    if (n == 1) {
3512
176
      node = isl_schedule_node_delete(node);
3513
176
      node = isl_schedule_node_delete(node);
3514
176
    } else 
if (148
n == 0148
) {
3515
0
      isl_space *space;
3516
0
3517
0
      filter =
3518
0
          isl_union_set_list_get_union_set(data->filters, 0);
3519
0
      space = isl_union_set_get_space(filter);
3520
0
      isl_union_set_free(filter);
3521
0
      filter = isl_union_set_empty(space);
3522
0
      node = isl_schedule_node_cut(node);
3523
0
      node = isl_schedule_node_insert_filter(node, filter);
3524
0
    }
3525
324
    break;
3526
791
  case isl_schedule_node_context:
3527
791
  case isl_schedule_node_domain:
3528
791
  case isl_schedule_node_guard:
3529
791
  case isl_schedule_node_leaf:
3530
791
  case isl_schedule_node_mark:
3531
791
    break;
3532
2.68k
  }
3533
2.68k
3534
2.68k
  return node;
3535
2.68k
}
3536
3537
/* Compute the gist of the subtree at "node" with respect to
3538
 * the reaching domain elements in "context".
3539
 * In particular, compute the gist of all band and filter nodes
3540
 * in the subtree with respect to "context".  Children of set or sequence
3541
 * nodes that end up with an empty filter are removed completely.
3542
 *
3543
 * We keep track of the intersection of "context" with all outer filters
3544
 * of the current node within the subtree in the final element of "filters".
3545
 * Initially, this list contains the single element "context" and it is
3546
 * extended or shortened each time we enter or leave a filter node.
3547
 */
3548
__isl_give isl_schedule_node *isl_schedule_node_gist(
3549
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *context)
3550
418
{
3551
418
  struct isl_node_gist_data data;
3552
418
3553
418
  data.n_expansion = 0;
3554
418
  data.filters = isl_union_set_list_from_union_set(context);
3555
418
  node = traverse(node, &gist_enter, &gist_leave, &data);
3556
418
  isl_union_set_list_free(data.filters);
3557
418
  return node;
3558
418
}
3559
3560
/* Intersect the domain of domain node "node" with "domain".
3561
 *
3562
 * If the domain of "node" is already a subset of "domain",
3563
 * then nothing needs to be changed.
3564
 *
3565
 * Otherwise, we replace the domain of the domain node by the intersection
3566
 * and simplify the subtree rooted at "node" with respect to this intersection.
3567
 */
3568
__isl_give isl_schedule_node *isl_schedule_node_domain_intersect_domain(
3569
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *domain)
3570
3.10k
{
3571
3.10k
  isl_schedule_tree *tree;
3572
3.10k
  isl_union_set *uset;
3573
3.10k
  int is_subset;
3574
3.10k
3575
3.10k
  if (!node || !domain)
3576
0
    goto error;
3577
3.10k
3578
3.10k
  uset = isl_schedule_tree_domain_get_domain(node->tree);
3579
3.10k
  is_subset = isl_union_set_is_subset(uset, domain);
3580
3.10k
  isl_union_set_free(uset);
3581
3.10k
  if (is_subset < 0)
3582
0
    goto error;
3583
3.10k
  if (is_subset) {
3584
2.84k
    isl_union_set_free(domain);
3585
2.84k
    return node;
3586
2.84k
  }
3587
260
3588
260
  tree = isl_schedule_tree_copy(node->tree);
3589
260
  uset = isl_schedule_tree_domain_get_domain(tree);
3590
260
  uset = isl_union_set_intersect(uset, domain);
3591
260
  tree = isl_schedule_tree_domain_set_domain(tree,
3592
260
                isl_union_set_copy(uset));
3593
260
  node = isl_schedule_node_graft_tree(node, tree);
3594
260
3595
260
  node = isl_schedule_node_child(node, 0);
3596
260
  node = isl_schedule_node_gist(node, uset);
3597
260
  node = isl_schedule_node_parent(node);
3598
260
3599
260
  return node;
3600
0
error:
3601
0
  isl_schedule_node_free(node);
3602
0
  isl_union_set_free(domain);
3603
0
  return NULL;
3604
260
}
3605
3606
/* Replace the domain of domain node "node" with the gist
3607
 * of the original domain with respect to the parameter domain "context".
3608
 */
3609
__isl_give isl_schedule_node *isl_schedule_node_domain_gist_params(
3610
  __isl_take isl_schedule_node *node, __isl_take isl_set *context)
3611
1.14k
{
3612
1.14k
  isl_union_set *domain;
3613
1.14k
  isl_schedule_tree *tree;
3614
1.14k
3615
1.14k
  if (!node || !context)
3616
0
    goto error;
3617
1.14k
3618
1.14k
  tree = isl_schedule_tree_copy(node->tree);
3619
1.14k
  domain = isl_schedule_tree_domain_get_domain(node->tree);
3620
1.14k
  domain = isl_union_set_gist_params(domain, context);
3621
1.14k
  tree = isl_schedule_tree_domain_set_domain(tree, domain);
3622
1.14k
  node = isl_schedule_node_graft_tree(node, tree);
3623
1.14k
3624
1.14k
  return node;
3625
0
error:
3626
0
  isl_schedule_node_free(node);
3627
0
  isl_set_free(context);
3628
0
  return NULL;
3629
1.14k
}
3630
3631
/* Internal data structure for isl_schedule_node_get_subtree_expansion.
3632
 * "expansions" contains a list of accumulated expansions
3633
 * for each outer expansion, set or sequence node.  The first element
3634
 * in the list is an identity mapping on the reaching domain elements.
3635
 * "res" collects the results.
3636
 */
3637
struct isl_subtree_expansion_data {
3638
  isl_union_map_list *expansions;
3639
  isl_union_map *res;
3640
};
3641
3642
/* Callback for "traverse" to enter a node and to move
3643
 * to the deepest initial subtree that should be traversed
3644
 * by isl_schedule_node_get_subtree_expansion.
3645
 *
3646
 * Whenever we come across an expansion node, the last element
3647
 * of data->expansions is combined with the expansion
3648
 * on the expansion node.
3649
 *
3650
 * Whenever we come across a filter node that is the child
3651
 * of a set or sequence node, data->expansions is extended
3652
 * with a new element that restricts the previous element
3653
 * to the elements selected by the filter.
3654
 * The previous element can then be reused while backtracking.
3655
 */
3656
static __isl_give isl_schedule_node *subtree_expansion_enter(
3657
  __isl_take isl_schedule_node *node, void *user)
3658
3
{
3659
3
  struct isl_subtree_expansion_data *data = user;
3660
3
3661
12
  do {
3662
12
    enum isl_schedule_node_type type;
3663
12
    isl_union_set *filter;
3664
12
    isl_union_map *inner, *expansion;
3665
12
    int n;
3666
12
3667
12
    switch (isl_schedule_node_get_type(node)) {
3668
12
    case isl_schedule_node_error:
3669
0
      return isl_schedule_node_free(node);
3670
12
    case isl_schedule_node_filter:
3671
4
      type = isl_schedule_node_get_parent_type(node);
3672
4
      if (type != isl_schedule_node_set &&
3673
4
          type != isl_schedule_node_sequence)
3674
0
        break;
3675
4
      filter = isl_schedule_node_filter_get_filter(node);
3676
4
      n = isl_union_map_list_n_union_map(data->expansions);
3677
4
      inner =
3678
4
          isl_union_map_list_get_union_map(data->expansions,
3679
4
                n - 1);
3680
4
      inner = isl_union_map_intersect_range(inner, filter);
3681
4
      data->expansions =
3682
4
          isl_union_map_list_add(data->expansions, inner);
3683
4
      break;
3684
4
    case isl_schedule_node_expansion:
3685
2
      n = isl_union_map_list_n_union_map(data->expansions);
3686
2
      expansion =
3687
2
        isl_schedule_node_expansion_get_expansion(node);
3688
2
      inner =
3689
2
          isl_union_map_list_get_union_map(data->expansions,
3690
2
                n - 1);
3691
2
      inner = isl_union_map_apply_range(inner, expansion);
3692
2
      data->expansions =
3693
2
          isl_union_map_list_set_union_map(data->expansions,
3694
2
                n - 1, inner);
3695
2
      break;
3696
6
    case isl_schedule_node_band:
3697
6
    case isl_schedule_node_context:
3698
6
    case isl_schedule_node_domain:
3699
6
    case isl_schedule_node_extension:
3700
6
    case isl_schedule_node_guard:
3701
6
    case isl_schedule_node_leaf:
3702
6
    case isl_schedule_node_mark:
3703
6
    case isl_schedule_node_sequence:
3704
6
    case isl_schedule_node_set:
3705
6
      break;
3706
12
    }
3707
12
  } while (isl_schedule_node_has_children(node) &&
3708
12
    
(node = isl_schedule_node_first_child(node)) != NULL9
);
3709
3
3710
3
  return node;
3711
3
}
3712
3713
/* Callback for "traverse" to leave a node for
3714
 * isl_schedule_node_get_subtree_expansion.
3715
 *
3716
 * If we come across a filter node that is the child
3717
 * of a set or sequence node, then we remove the element
3718
 * of data->expansions that was added in subtree_expansion_enter.
3719
 *
3720
 * If we reach a leaf node, then the accumulated expansion is
3721
 * added to data->res.
3722
 */
3723
static __isl_give isl_schedule_node *subtree_expansion_leave(
3724
  __isl_take isl_schedule_node *node, void *user)
3725
12
{
3726
12
  struct isl_subtree_expansion_data *data = user;
3727
12
  int n;
3728
12
  isl_union_map *inner;
3729
12
  enum isl_schedule_node_type type;
3730
12
3731
12
  switch (isl_schedule_node_get_type(node)) {
3732
12
  case isl_schedule_node_error:
3733
0
    return isl_schedule_node_free(node);
3734
12
  case isl_schedule_node_filter:
3735
4
    type = isl_schedule_node_get_parent_type(node);
3736
4
    if (type != isl_schedule_node_set &&
3737
4
        type != isl_schedule_node_sequence)
3738
0
      break;
3739
4
    n = isl_union_map_list_n_union_map(data->expansions);
3740
4
    data->expansions = isl_union_map_list_drop(data->expansions,
3741
4
              n - 1, 1);
3742
4
    break;
3743
4
  case isl_schedule_node_leaf:
3744
3
    n = isl_union_map_list_n_union_map(data->expansions);
3745
3
    inner = isl_union_map_list_get_union_map(data->expansions,
3746
3
              n - 1);
3747
3
    data->res = isl_union_map_union(data->res, inner);
3748
3
    break;
3749
5
  case isl_schedule_node_band:
3750
5
  case isl_schedule_node_context:
3751
5
  case isl_schedule_node_domain:
3752
5
  case isl_schedule_node_expansion:
3753
5
  case isl_schedule_node_extension:
3754
5
  case isl_schedule_node_guard:
3755
5
  case isl_schedule_node_mark:
3756
5
  case isl_schedule_node_sequence:
3757
5
  case isl_schedule_node_set:
3758
5
    break;
3759
12
  }
3760
12
3761
12
  return node;
3762
12
}
3763
3764
/* Return a mapping from the domain elements that reach "node"
3765
 * to the corresponding domain elements in the leaves of the subtree
3766
 * rooted at "node" obtained by composing the intermediate expansions.
3767
 *
3768
 * We start out with an identity mapping between the domain elements
3769
 * that reach "node" and compose it with all the expansions
3770
 * on a path from "node" to a leaf while traversing the subtree.
3771
 * Within the children of an a sequence or set node, the
3772
 * accumulated expansion is restricted to the elements selected
3773
 * by the filter child.
3774
 */
3775
__isl_give isl_union_map *isl_schedule_node_get_subtree_expansion(
3776
  __isl_keep isl_schedule_node *node)
3777
1
{
3778
1
  struct isl_subtree_expansion_data data;
3779
1
  isl_space *space;
3780
1
  isl_union_set *domain;
3781
1
  isl_union_map *expansion;
3782
1
3783
1
  if (!node)
3784
0
    return NULL;
3785
1
3786
1
  domain = isl_schedule_node_get_universe_domain(node);
3787
1
  space = isl_union_set_get_space(domain);
3788
1
  expansion = isl_union_set_identity(domain);
3789
1
  data.res = isl_union_map_empty(space);
3790
1
  data.expansions = isl_union_map_list_from_union_map(expansion);
3791
1
3792
1
  node = isl_schedule_node_copy(node);
3793
1
  node = traverse(node, &subtree_expansion_enter,
3794
1
      &subtree_expansion_leave, &data);
3795
1
  if (!node)
3796
0
    data.res = isl_union_map_free(data.res);
3797
1
  isl_schedule_node_free(node);
3798
1
3799
1
  isl_union_map_list_free(data.expansions);
3800
1
3801
1
  return data.res;
3802
1
}
3803
3804
/* Internal data structure for isl_schedule_node_get_subtree_contraction.
3805
 * "contractions" contains a list of accumulated contractions
3806
 * for each outer expansion, set or sequence node.  The first element
3807
 * in the list is an identity mapping on the reaching domain elements.
3808
 * "res" collects the results.
3809
 */
3810
struct isl_subtree_contraction_data {
3811
  isl_union_pw_multi_aff_list *contractions;
3812
  isl_union_pw_multi_aff *res;
3813
};
3814
3815
/* Callback for "traverse" to enter a node and to move
3816
 * to the deepest initial subtree that should be traversed
3817
 * by isl_schedule_node_get_subtree_contraction.
3818
 *
3819
 * Whenever we come across an expansion node, the last element
3820
 * of data->contractions is combined with the contraction
3821
 * on the expansion node.
3822
 *
3823
 * Whenever we come across a filter node that is the child
3824
 * of a set or sequence node, data->contractions is extended
3825
 * with a new element that restricts the previous element
3826
 * to the elements selected by the filter.
3827
 * The previous element can then be reused while backtracking.
3828
 */
3829
static __isl_give isl_schedule_node *subtree_contraction_enter(
3830
  __isl_take isl_schedule_node *node, void *user)
3831
8
{
3832
8
  struct isl_subtree_contraction_data *data = user;
3833
8
3834
24
  do {
3835
24
    enum isl_schedule_node_type type;
3836
24
    isl_union_set *filter;
3837
24
    isl_union_pw_multi_aff *inner, *contraction;
3838
24
    int n;
3839
24
3840
24
    switch (isl_schedule_node_get_type(node)) {
3841
24
    case isl_schedule_node_error:
3842
0
      return isl_schedule_node_free(node);
3843
24
    case isl_schedule_node_filter:
3844
0
      type = isl_schedule_node_get_parent_type(node);
3845
0
      if (type != isl_schedule_node_set &&
3846
0
          type != isl_schedule_node_sequence)
3847
0
        break;
3848
0
      filter = isl_schedule_node_filter_get_filter(node);
3849
0
      n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff(
3850
0
            data->contractions);
3851
0
      inner =
3852
0
          isl_union_pw_multi_aff_list_get_union_pw_multi_aff(
3853
0
            data->contractions, n - 1);
3854
0
      inner = isl_union_pw_multi_aff_intersect_domain(inner,
3855
0
                filter);
3856
0
      data->contractions =
3857
0
          isl_union_pw_multi_aff_list_add(data->contractions,
3858
0
                inner);
3859
0
      break;
3860
0
    case isl_schedule_node_expansion:
3861
0
      n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff(
3862
0
            data->contractions);
3863
0
      contraction =
3864
0
          isl_schedule_node_expansion_get_contraction(node);
3865
0
      inner =
3866
0
          isl_union_pw_multi_aff_list_get_union_pw_multi_aff(
3867
0
            data->contractions, n - 1);
3868
0
      inner =
3869
0
          isl_union_pw_multi_aff_pullback_union_pw_multi_aff(
3870
0
            inner, contraction);
3871
0
      data->contractions =
3872
0
          isl_union_pw_multi_aff_list_set_union_pw_multi_aff(
3873
0
          data->contractions, n - 1, inner);
3874
0
      break;
3875
24
    case isl_schedule_node_band:
3876
24
    case isl_schedule_node_context:
3877
24
    case isl_schedule_node_domain:
3878
24
    case isl_schedule_node_extension:
3879
24
    case isl_schedule_node_guard:
3880
24
    case isl_schedule_node_leaf:
3881
24
    case isl_schedule_node_mark:
3882
24
    case isl_schedule_node_sequence:
3883
24
    case isl_schedule_node_set:
3884
24
      break;
3885
24
    }
3886
24
  } while (isl_schedule_node_has_children(node) &&
3887
24
    
(node = isl_schedule_node_first_child(node)) != NULL16
);
3888
8
3889
8
  return node;
3890
8
}
3891
3892
/* Callback for "traverse" to leave a node for
3893
 * isl_schedule_node_get_subtree_contraction.
3894
 *
3895
 * If we come across a filter node that is the child
3896
 * of a set or sequence node, then we remove the element
3897
 * of data->contractions that was added in subtree_contraction_enter.
3898
 *
3899
 * If we reach a leaf node, then the accumulated contraction is
3900
 * added to data->res.
3901
 */
3902
static __isl_give isl_schedule_node *subtree_contraction_leave(
3903
  __isl_take isl_schedule_node *node, void *user)
3904
24
{
3905
24
  struct isl_subtree_contraction_data *data = user;
3906
24
  int n;
3907
24
  isl_union_pw_multi_aff *inner;
3908
24
  enum isl_schedule_node_type type;
3909
24
3910
24
  switch (isl_schedule_node_get_type(node)) {
3911
24
  case isl_schedule_node_error:
3912
0
    return isl_schedule_node_free(node);
3913
24
  case isl_schedule_node_filter:
3914
0
    type = isl_schedule_node_get_parent_type(node);
3915
0
    if (type != isl_schedule_node_set &&
3916
0
        type != isl_schedule_node_sequence)
3917
0
      break;
3918
0
    n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff(
3919
0
            data->contractions);
3920
0
    data->contractions =
3921
0
      isl_union_pw_multi_aff_list_drop(data->contractions,
3922
0
              n - 1, 1);
3923
0
    break;
3924
8
  case isl_schedule_node_leaf:
3925
8
    n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff(
3926
8
            data->contractions);
3927
8
    inner = isl_union_pw_multi_aff_list_get_union_pw_multi_aff(
3928
8
            data->contractions, n - 1);
3929
8
    data->res = isl_union_pw_multi_aff_union_add(data->res, inner);
3930
8
    break;
3931
16
  case isl_schedule_node_band:
3932
16
  case isl_schedule_node_context:
3933
16
  case isl_schedule_node_domain:
3934
16
  case isl_schedule_node_expansion:
3935
16
  case isl_schedule_node_extension:
3936
16
  case isl_schedule_node_guard:
3937
16
  case isl_schedule_node_mark:
3938
16
  case isl_schedule_node_sequence:
3939
16
  case isl_schedule_node_set:
3940
16
    break;
3941
24
  }
3942
24
3943
24
  return node;
3944
24
}
3945
3946
/* Return a mapping from the domain elements in the leaves of the subtree
3947
 * rooted at "node" to the corresponding domain elements that reach "node"
3948
 * obtained by composing the intermediate contractions.
3949
 *
3950
 * We start out with an identity mapping between the domain elements
3951
 * that reach "node" and compose it with all the contractions
3952
 * on a path from "node" to a leaf while traversing the subtree.
3953
 * Within the children of an a sequence or set node, the
3954
 * accumulated contraction is restricted to the elements selected
3955
 * by the filter child.
3956
 */
3957
__isl_give isl_union_pw_multi_aff *isl_schedule_node_get_subtree_contraction(
3958
  __isl_keep isl_schedule_node *node)
3959
8
{
3960
8
  struct isl_subtree_contraction_data data;
3961
8
  isl_space *space;
3962
8
  isl_union_set *domain;
3963
8
  isl_union_pw_multi_aff *contraction;
3964
8
3965
8
  if (!node)
3966
0
    return NULL;
3967
8
3968
8
  domain = isl_schedule_node_get_universe_domain(node);
3969
8
  space = isl_union_set_get_space(domain);
3970
8
  contraction = isl_union_set_identity_union_pw_multi_aff(domain);
3971
8
  data.res = isl_union_pw_multi_aff_empty(space);
3972
8
  data.contractions =
3973
8
      isl_union_pw_multi_aff_list_from_union_pw_multi_aff(contraction);
3974
8
3975
8
  node = isl_schedule_node_copy(node);
3976
8
  node = traverse(node, &subtree_contraction_enter,
3977
8
      &subtree_contraction_leave, &data);
3978
8
  if (!node)
3979
0
    data.res = isl_union_pw_multi_aff_free(data.res);
3980
8
  isl_schedule_node_free(node);
3981
8
3982
8
  isl_union_pw_multi_aff_list_free(data.contractions);
3983
8
3984
8
  return data.res;
3985
8
}
3986
3987
/* Do the nearest "n" ancestors of "node" have the types given in "types"
3988
 * (starting at the parent of "node")?
3989
 */
3990
static int has_ancestors(__isl_keep isl_schedule_node *node,
3991
  int n, enum isl_schedule_node_type *types)
3992
24
{
3993
24
  int i, n_ancestor;
3994
24
3995
24
  if (!node)
3996
0
    return -1;
3997
24
3998
24
  n_ancestor = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
3999
24
  if (n_ancestor < n)
4000
0
    return 0;
4001
24
4002
24
  for (i = 0; i < n; 
++i0
) {
4003
24
    isl_schedule_tree *tree;
4004
24
    int correct_type;
4005
24
4006
24
    tree = isl_schedule_tree_list_get_schedule_tree(node->ancestors,
4007
24
                  n_ancestor - 1 - i);
4008
24
    if (!tree)
4009
0
      return -1;
4010
24
    correct_type = isl_schedule_tree_get_type(tree) == types[i];
4011
24
    isl_schedule_tree_free(tree);
4012
24
    if (!correct_type)
4013
24
      return 0;
4014
24
  }
4015
24
4016
24
  
return 10
;
4017
24
}
4018
4019
/* Given a node "node" that appears in an extension (i.e., it is the child
4020
 * of a filter in a sequence inside an extension node), are the spaces
4021
 * of the extension specified by "extension" disjoint from those
4022
 * of both the original extension and the domain elements that reach
4023
 * that original extension?
4024
 */
4025
static int is_disjoint_extension(__isl_keep isl_schedule_node *node,
4026
  __isl_keep isl_union_map *extension)
4027
0
{
4028
0
  isl_union_map *old;
4029
0
  isl_union_set *domain;
4030
0
  int empty;
4031
0
4032
0
  node = isl_schedule_node_copy(node);
4033
0
  node = isl_schedule_node_parent(node);
4034
0
  node = isl_schedule_node_parent(node);
4035
0
  node = isl_schedule_node_parent(node);
4036
0
  old = isl_schedule_node_extension_get_extension(node);
4037
0
  domain = isl_schedule_node_get_universe_domain(node);
4038
0
  isl_schedule_node_free(node);
4039
0
  old = isl_union_map_universe(old);
4040
0
  domain = isl_union_set_union(domain, isl_union_map_range(old));
4041
0
  extension = isl_union_map_copy(extension);
4042
0
  extension = isl_union_map_intersect_range(extension, domain);
4043
0
  empty = isl_union_map_is_empty(extension);
4044
0
  isl_union_map_free(extension);
4045
0
4046
0
  return empty;
4047
0
}
4048
4049
/* Given a node "node" that is governed by an extension node, extend
4050
 * that extension node with "extension".
4051
 *
4052
 * In particular, "node" is the child of a filter in a sequence that
4053
 * is in turn a child of an extension node.  Extend that extension node
4054
 * with "extension".
4055
 *
4056
 * Return a pointer to the parent of the original node (i.e., a filter).
4057
 */
4058
static __isl_give isl_schedule_node *extend_extension(
4059
  __isl_take isl_schedule_node *node, __isl_take isl_union_map *extension)
4060
0
{
4061
0
  int pos;
4062
0
  int disjoint;
4063
0
  isl_union_map *node_extension;
4064
0
4065
0
  node = isl_schedule_node_parent(node);
4066
0
  pos = isl_schedule_node_get_child_position(node);
4067
0
  node = isl_schedule_node_parent(node);
4068
0
  node = isl_schedule_node_parent(node);
4069
0
  node_extension = isl_schedule_node_extension_get_extension(node);
4070
0
  disjoint = isl_union_map_is_disjoint(extension, node_extension);
4071
0
  extension = isl_union_map_union(extension, node_extension);
4072
0
  node = isl_schedule_node_extension_set_extension(node, extension);
4073
0
  node = isl_schedule_node_child(node, 0);
4074
0
  node = isl_schedule_node_child(node, pos);
4075
0
4076
0
  if (disjoint < 0)
4077
0
    return isl_schedule_node_free(node);
4078
0
  if (!node)
4079
0
    return NULL;
4080
0
  if (!disjoint)
4081
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
4082
0
      "extension domain should be disjoint from earlier "
4083
0
      "extensions", return isl_schedule_node_free(node));
4084
0
4085
0
  return node;
4086
0
}
4087
4088
/* Return the universe of "uset" if this universe is disjoint from "ref".
4089
 * Otherwise, return "uset".
4090
 *
4091
 * Also check if "uset" itself is disjoint from "ref", reporting
4092
 * an error if it is not.
4093
 */
4094
static __isl_give isl_union_set *replace_by_universe_if_disjoint(
4095
  __isl_take isl_union_set *uset, __isl_keep isl_union_set *ref)
4096
48
{
4097
48
  int disjoint;
4098
48
  isl_union_set *universe;
4099
48
4100
48
  disjoint = isl_union_set_is_disjoint(uset, ref);
4101
48
  if (disjoint < 0)
4102
0
    return isl_union_set_free(uset);
4103
48
  if (!disjoint)
4104
48
    
isl_die0
(isl_union_set_get_ctx(uset), isl_error_invalid,
4105
48
      "extension domain should be disjoint from "
4106
48
      "current domain", return isl_union_set_free(uset));
4107
48
4108
48
  universe = isl_union_set_universe(isl_union_set_copy(uset));
4109
48
  disjoint = isl_union_set_is_disjoint(universe, ref);
4110
48
  if (disjoint >= 0 && disjoint) {
4111
48
    isl_union_set_free(uset);
4112
48
    return universe;
4113
48
  }
4114
0
  isl_union_set_free(universe);
4115
0
4116
0
  if (disjoint < 0)
4117
0
    return isl_union_set_free(uset);
4118
0
  return uset;
4119
0
}
4120
4121
/* Insert an extension node on top of "node" with extension "extension".
4122
 * In addition, insert a filter that separates node from the extension
4123
 * between the extension node and "node".
4124
 * Return a pointer to the inserted filter node.
4125
 *
4126
 * If "node" already appears in an extension (i.e., if it is the child
4127
 * of a filter in a sequence inside an extension node), then extend that
4128
 * extension with "extension" instead.
4129
 * In this case, a pointer to the original filter node is returned.
4130
 * Note that if some of the elements in the new extension live in the
4131
 * same space as those of the original extension or the domain elements
4132
 * reaching the original extension, then we insert a new extension anyway.
4133
 * Otherwise, we would have to adjust the filters in the sequence child
4134
 * of the extension to ensure that the elements in the new extension
4135
 * are filtered out.
4136
 */
4137
static __isl_give isl_schedule_node *insert_extension(
4138
  __isl_take isl_schedule_node *node, __isl_take isl_union_map *extension)
4139
24
{
4140
24
  enum isl_schedule_node_type ancestors[] =
4141
24
    { isl_schedule_node_filter, isl_schedule_node_sequence,
4142
24
      isl_schedule_node_extension };
4143
24
  isl_union_set *domain;
4144
24
  isl_union_set *filter;
4145
24
  int in_ext;
4146
24
4147
24
  in_ext = has_ancestors(node, 3, ancestors);
4148
24
  if (in_ext < 0)
4149
0
    goto error;
4150
24
  if (in_ext) {
4151
0
    int disjoint;
4152
0
4153
0
    disjoint = is_disjoint_extension(node, extension);
4154
0
    if (disjoint < 0)
4155
0
      goto error;
4156
0
    if (disjoint)
4157
0
      return extend_extension(node, extension);
4158
24
  }
4159
24
4160
24
  filter = isl_schedule_node_get_domain(node);
4161
24
  domain = isl_union_map_range(isl_union_map_copy(extension));
4162
24
  filter = replace_by_universe_if_disjoint(filter, domain);
4163
24
  isl_union_set_free(domain);
4164
24
4165
24
  node = isl_schedule_node_insert_filter(node, filter);
4166
24
  node = isl_schedule_node_insert_extension(node, extension);
4167
24
  node = isl_schedule_node_child(node, 0);
4168
24
  return node;
4169
0
error:
4170
0
  isl_schedule_node_free(node);
4171
0
  isl_union_map_free(extension);
4172
0
  return NULL;
4173
24
}
4174
4175
/* Replace the subtree that "node" points to by "tree" (which has
4176
 * a sequence root with two children), except if the parent of "node"
4177
 * is a sequence as well, in which case "tree" is spliced at the position
4178
 * of "node" in its parent.
4179
 * Return a pointer to the child of the "tree_pos" (filter) child of "tree"
4180
 * in the updated schedule tree.
4181
 */
4182
static __isl_give isl_schedule_node *graft_or_splice(
4183
  __isl_take isl_schedule_node *node, __isl_take isl_schedule_tree *tree,
4184
  int tree_pos)
4185
24
{
4186
24
  int pos;
4187
24
4188
24
  if (isl_schedule_node_get_parent_type(node) ==
4189
24
      isl_schedule_node_sequence) {
4190
0
    pos = isl_schedule_node_get_child_position(node);
4191
0
    node = isl_schedule_node_parent(node);
4192
0
    node = isl_schedule_node_sequence_splice(node, pos, tree);
4193
24
  } else {
4194
24
    pos = 0;
4195
24
    node = isl_schedule_node_graft_tree(node, tree);
4196
24
  }
4197
24
  node = isl_schedule_node_child(node, pos + tree_pos);
4198
24
  node = isl_schedule_node_child(node, 0);
4199
24
4200
24
  return node;
4201
24
}
4202
4203
/* Insert a node "graft" into the schedule tree of "node" such that it
4204
 * is executed before (if "before" is set) or after (if "before" is not set)
4205
 * the node that "node" points to.
4206
 * The root of "graft" is an extension node.
4207
 * Return a pointer to the node that "node" pointed to.
4208
 *
4209
 * We first insert an extension node on top of "node" (or extend
4210
 * the extension node if there already is one), with a filter on "node"
4211
 * separating it from the extension.
4212
 * We then insert a filter in the graft to separate it from the original
4213
 * domain elements and combine the original and new tree in a sequence.
4214
 * If we have extended an extension node, then the children of this
4215
 * sequence are spliced in the sequence of the extended extension
4216
 * at the position where "node" appears in the original extension.
4217
 * Otherwise, the sequence pair is attached to the new extension node.
4218
 */
4219
static __isl_give isl_schedule_node *graft_extension(
4220
  __isl_take isl_schedule_node *node, __isl_take isl_schedule_node *graft,
4221
  int before)
4222
24
{
4223
24
  isl_union_map *extension;
4224
24
  isl_union_set *graft_domain;
4225
24
  isl_union_set *node_domain;
4226
24
  isl_schedule_tree *tree, *tree_graft;
4227
24
4228
24
  extension = isl_schedule_node_extension_get_extension(graft);
4229
24
  graft_domain = isl_union_map_range(isl_union_map_copy(extension));
4230
24
  node_domain = isl_schedule_node_get_universe_domain(node);
4231
24
  node = insert_extension(node, extension);
4232
24
4233
24
  graft_domain = replace_by_universe_if_disjoint(graft_domain,
4234
24
              node_domain);
4235
24
  isl_union_set_free(node_domain);
4236
24
4237
24
  tree = isl_schedule_node_get_tree(node);
4238
24
  if (!isl_schedule_node_has_children(graft)) {
4239
0
    tree_graft = isl_schedule_tree_from_filter(graft_domain);
4240
24
  } else {
4241
24
    graft = isl_schedule_node_child(graft, 0);
4242
24
    tree_graft = isl_schedule_node_get_tree(graft);
4243
24
    tree_graft = isl_schedule_tree_insert_filter(tree_graft,
4244
24
                graft_domain);
4245
24
  }
4246
24
  if (before)
4247
24
    tree = isl_schedule_tree_sequence_pair(tree_graft, tree);
4248
0
  else
4249
0
    tree = isl_schedule_tree_sequence_pair(tree, tree_graft);
4250
24
  node = graft_or_splice(node, tree, before);
4251
24
4252
24
  isl_schedule_node_free(graft);
4253
24
4254
24
  return node;
4255
24
}
4256
4257
/* Replace the root domain node of "node" by an extension node suitable
4258
 * for insertion at "pos".
4259
 * That is, create an extension node that maps the outer band nodes
4260
 * at "pos" to the domain of the root node of "node" and attach
4261
 * the child of this root node to the extension node.
4262
 */
4263
static __isl_give isl_schedule_node *extension_from_domain(
4264
  __isl_take isl_schedule_node *node, __isl_keep isl_schedule_node *pos)
4265
0
{
4266
0
  isl_union_set *universe;
4267
0
  isl_union_set *domain;
4268
0
  isl_union_map *ext;
4269
0
  int depth;
4270
0
  int anchored;
4271
0
  isl_space *space;
4272
0
  isl_schedule_node *res;
4273
0
  isl_schedule_tree *tree;
4274
0
4275
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
4276
0
  if (anchored < 0)
4277
0
    return isl_schedule_node_free(node);
4278
0
  if (anchored)
4279
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_unsupported,
4280
0
      "cannot graft anchored tree with domain root",
4281
0
      return isl_schedule_node_free(node));
4282
0
4283
0
  depth = isl_schedule_node_get_schedule_depth(pos);
4284
0
  domain = isl_schedule_node_domain_get_domain(node);
4285
0
  space = isl_union_set_get_space(domain);
4286
0
  space = isl_space_set_from_params(space);
4287
0
  space = isl_space_add_dims(space, isl_dim_set, depth);
4288
0
  universe = isl_union_set_from_set(isl_set_universe(space));
4289
0
  ext = isl_union_map_from_domain_and_range(universe, domain);
4290
0
  res = isl_schedule_node_from_extension(ext);
4291
0
  node = isl_schedule_node_child(node, 0);
4292
0
  if (!node)
4293
0
    return isl_schedule_node_free(res);
4294
0
  if (!isl_schedule_tree_is_leaf(node->tree)) {
4295
0
    tree = isl_schedule_node_get_tree(node);
4296
0
    res = isl_schedule_node_child(res, 0);
4297
0
    res = isl_schedule_node_graft_tree(res, tree);
4298
0
    res = isl_schedule_node_parent(res);
4299
0
  }
4300
0
  isl_schedule_node_free(node);
4301
0
4302
0
  return res;
4303
0
}
4304
4305
/* Insert a node "graft" into the schedule tree of "node" such that it
4306
 * is executed before (if "before" is set) or after (if "before" is not set)
4307
 * the node that "node" points to.
4308
 * The root of "graft" may be either a domain or an extension node.
4309
 * In the latter case, the domain of the extension needs to correspond
4310
 * to the outer band nodes of "node".
4311
 * The elements of the domain or the range of the extension may not
4312
 * intersect with the domain elements that reach "node".
4313
 * The schedule tree of "graft" may not be anchored.
4314
 *
4315
 * The schedule tree of "node" is modified to include an extension node
4316
 * corresponding to the root node of "graft" as a child of the original
4317
 * parent of "node".  The original node that "node" points to and the
4318
 * child of the root node of "graft" are attached to this extension node
4319
 * through a sequence, with appropriate filters and with the child
4320
 * of "graft" appearing before or after the original "node".
4321
 *
4322
 * If "node" already appears inside a sequence that is the child of
4323
 * an extension node and if the spaces of the new domain elements
4324
 * do not overlap with those of the original domain elements,
4325
 * then that extension node is extended with the new extension
4326
 * rather than introducing a new segment of extension and sequence nodes.
4327
 *
4328
 * Return a pointer to the same node in the modified tree that
4329
 * "node" pointed to in the original tree.
4330
 */
4331
static __isl_give isl_schedule_node *isl_schedule_node_graft_before_or_after(
4332
  __isl_take isl_schedule_node *node, __isl_take isl_schedule_node *graft,
4333
  int before)
4334
24
{
4335
24
  if (!node || !graft)
4336
0
    goto error;
4337
24
  if (check_insert(node) < 0)
4338
0
    goto error;
4339
24
4340
24
  if (isl_schedule_node_get_type(graft) == isl_schedule_node_domain)
4341
0
    graft = extension_from_domain(graft, node);
4342
24
4343
24
  if (!graft)
4344
0
    goto error;
4345
24
  if (isl_schedule_node_get_type(graft) != isl_schedule_node_extension)
4346
24
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
4347
24
      "expecting domain or extension as root of graft",
4348
24
      goto error);
4349
24
4350
24
  return graft_extension(node, graft, before);
4351
0
error:
4352
0
  isl_schedule_node_free(node);
4353
0
  isl_schedule_node_free(graft);
4354
0
  return NULL;
4355
24
}
4356
4357
/* Insert a node "graft" into the schedule tree of "node" such that it
4358
 * is executed before the node that "node" points to.
4359
 * The root of "graft" may be either a domain or an extension node.
4360
 * In the latter case, the domain of the extension needs to correspond
4361
 * to the outer band nodes of "node".
4362
 * The elements of the domain or the range of the extension may not
4363
 * intersect with the domain elements that reach "node".
4364
 * The schedule tree of "graft" may not be anchored.
4365
 *
4366
 * Return a pointer to the same node in the modified tree that
4367
 * "node" pointed to in the original tree.
4368
 */
4369
__isl_give isl_schedule_node *isl_schedule_node_graft_before(
4370
  __isl_take isl_schedule_node *node, __isl_take isl_schedule_node *graft)
4371
24
{
4372
24
  return isl_schedule_node_graft_before_or_after(node, graft, 1);
4373
24
}
4374
4375
/* Insert a node "graft" into the schedule tree of "node" such that it
4376
 * is executed after the node that "node" points to.
4377
 * The root of "graft" may be either a domain or an extension node.
4378
 * In the latter case, the domain of the extension needs to correspond
4379
 * to the outer band nodes of "node".
4380
 * The elements of the domain or the range of the extension may not
4381
 * intersect with the domain elements that reach "node".
4382
 * The schedule tree of "graft" may not be anchored.
4383
 *
4384
 * Return a pointer to the same node in the modified tree that
4385
 * "node" pointed to in the original tree.
4386
 */
4387
__isl_give isl_schedule_node *isl_schedule_node_graft_after(
4388
  __isl_take isl_schedule_node *node,
4389
  __isl_take isl_schedule_node *graft)
4390
0
{
4391
0
  return isl_schedule_node_graft_before_or_after(node, graft, 0);
4392
0
}
4393
4394
/* Split the domain elements that reach "node" into those that satisfy
4395
 * "filter" and those that do not.  Arrange for the first subset to be
4396
 * executed before or after the second subset, depending on the value
4397
 * of "before".
4398
 * Return a pointer to the tree corresponding to the second subset,
4399
 * except when this subset is empty in which case the original pointer
4400
 * is returned.
4401
 * If both subsets are non-empty, then a sequence node is introduced
4402
 * to impose the order.  If the grandparent of the original node was
4403
 * itself a sequence, then the original child is replaced by two children
4404
 * in this sequence instead.
4405
 * The children in the sequence are copies of the original subtree,
4406
 * simplified with respect to their filters.
4407
 */
4408
static __isl_give isl_schedule_node *isl_schedule_node_order_before_or_after(
4409
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter,
4410
  int before)
4411
0
{
4412
0
  enum isl_schedule_node_type ancestors[] =
4413
0
    { isl_schedule_node_filter, isl_schedule_node_sequence };
4414
0
  isl_union_set *node_domain, *node_filter = NULL, *parent_filter;
4415
0
  isl_schedule_node *node2;
4416
0
  isl_schedule_tree *tree1, *tree2;
4417
0
  int empty1, empty2;
4418
0
  int in_seq;
4419
0
4420
0
  if (!node || !filter)
4421
0
    goto error;
4422
0
  if (check_insert(node) < 0)
4423
0
    goto error;
4424
0
4425
0
  in_seq = has_ancestors(node, 2, ancestors);
4426
0
  if (in_seq < 0)
4427
0
    goto error;
4428
0
  node_domain = isl_schedule_node_get_domain(node);
4429
0
  filter = isl_union_set_gist(filter, isl_union_set_copy(node_domain));
4430
0
  node_filter = isl_union_set_copy(node_domain);
4431
0
  node_filter = isl_union_set_subtract(node_filter,
4432
0
            isl_union_set_copy(filter));
4433
0
  node_filter = isl_union_set_gist(node_filter, node_domain);
4434
0
  empty1 = isl_union_set_is_empty(filter);
4435
0
  empty2 = isl_union_set_is_empty(node_filter);
4436
0
  if (empty1 < 0 || empty2 < 0)
4437
0
    goto error;
4438
0
  if (empty1 || empty2) {
4439
0
    isl_union_set_free(filter);
4440
0
    isl_union_set_free(node_filter);
4441
0
    return node;
4442
0
  }
4443
0
4444
0
  if (in_seq) {
4445
0
    node = isl_schedule_node_parent(node);
4446
0
    parent_filter = isl_schedule_node_filter_get_filter(node);
4447
0
    node_filter = isl_union_set_intersect(node_filter,
4448
0
              isl_union_set_copy(parent_filter));
4449
0
    filter = isl_union_set_intersect(filter, parent_filter);
4450
0
  }
4451
0
4452
0
  node2 = isl_schedule_node_copy(node);
4453
0
  node = isl_schedule_node_gist(node, isl_union_set_copy(node_filter));
4454
0
  node2 = isl_schedule_node_gist(node2, isl_union_set_copy(filter));
4455
0
  tree1 = isl_schedule_node_get_tree(node);
4456
0
  tree2 = isl_schedule_node_get_tree(node2);
4457
0
  tree1 = isl_schedule_tree_insert_filter(tree1, node_filter);
4458
0
  tree2 = isl_schedule_tree_insert_filter(tree2, filter);
4459
0
  isl_schedule_node_free(node2);
4460
0
4461
0
  if (before) {
4462
0
    tree1 = isl_schedule_tree_sequence_pair(tree2, tree1);
4463
0
    node = graft_or_splice(node, tree1, 1);
4464
0
  } else {
4465
0
    tree1 = isl_schedule_tree_sequence_pair(tree1, tree2);
4466
0
    node = graft_or_splice(node, tree1, 0);
4467
0
  }
4468
0
4469
0
  return node;
4470
0
error:
4471
0
  isl_schedule_node_free(node);
4472
0
  isl_union_set_free(filter);
4473
0
  isl_union_set_free(node_filter);
4474
0
  return NULL;
4475
0
}
4476
4477
/* Split the domain elements that reach "node" into those that satisfy
4478
 * "filter" and those that do not.  Arrange for the first subset to be
4479
 * executed before the second subset.
4480
 * Return a pointer to the tree corresponding to the second subset,
4481
 * except when this subset is empty in which case the original pointer
4482
 * is returned.
4483
 */
4484
__isl_give isl_schedule_node *isl_schedule_node_order_before(
4485
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
4486
0
{
4487
0
  return isl_schedule_node_order_before_or_after(node, filter, 1);
4488
0
}
4489
4490
/* Split the domain elements that reach "node" into those that satisfy
4491
 * "filter" and those that do not.  Arrange for the first subset to be
4492
 * executed after the second subset.
4493
 * Return a pointer to the tree corresponding to the second subset,
4494
 * except when this subset is empty in which case the original pointer
4495
 * is returned.
4496
 */
4497
__isl_give isl_schedule_node *isl_schedule_node_order_after(
4498
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
4499
0
{
4500
0
  return isl_schedule_node_order_before_or_after(node, filter, 0);
4501
0
}
4502
4503
/* Reset the user pointer on all identifiers of parameters and tuples
4504
 * in the schedule node "node".
4505
 */
4506
__isl_give isl_schedule_node *isl_schedule_node_reset_user(
4507
  __isl_take isl_schedule_node *node)
4508
0
{
4509
0
  isl_schedule_tree *tree;
4510
0
4511
0
  tree = isl_schedule_node_get_tree(node);
4512
0
  tree = isl_schedule_tree_reset_user(tree);
4513
0
  node = isl_schedule_node_graft_tree(node, tree);
4514
0
4515
0
  return node;
4516
0
}
4517
4518
/* Align the parameters of the schedule node "node" to those of "space".
4519
 */
4520
__isl_give isl_schedule_node *isl_schedule_node_align_params(
4521
  __isl_take isl_schedule_node *node, __isl_take isl_space *space)
4522
0
{
4523
0
  isl_schedule_tree *tree;
4524
0
4525
0
  tree = isl_schedule_node_get_tree(node);
4526
0
  tree = isl_schedule_tree_align_params(tree, space);
4527
0
  node = isl_schedule_node_graft_tree(node, tree);
4528
0
4529
0
  return node;
4530
0
}
4531
4532
/* Compute the pullback of schedule node "node"
4533
 * by the function represented by "upma".
4534
 * In other words, plug in "upma" in the iteration domains
4535
 * of schedule node "node".
4536
 * We currently do not handle expansion nodes.
4537
 *
4538
 * Note that this is only a helper function for
4539
 * isl_schedule_pullback_union_pw_multi_aff.  In order to maintain consistency,
4540
 * this function should not be called on a single node without also
4541
 * calling it on all the other nodes.
4542
 */
4543
__isl_give isl_schedule_node *isl_schedule_node_pullback_union_pw_multi_aff(
4544
  __isl_take isl_schedule_node *node,
4545
  __isl_take isl_union_pw_multi_aff *upma)
4546
1.06k
{
4547
1.06k
  isl_schedule_tree *tree;
4548
1.06k
4549
1.06k
  tree = isl_schedule_node_get_tree(node);
4550
1.06k
  tree = isl_schedule_tree_pullback_union_pw_multi_aff(tree, upma);
4551
1.06k
  node = isl_schedule_node_graft_tree(node, tree);
4552
1.06k
4553
1.06k
  return node;
4554
1.06k
}
4555
4556
/* Internal data structure for isl_schedule_node_expand.
4557
 * "tree" is the tree that needs to be plugged in in all the leaves.
4558
 * "domain" is the set of domain elements in the original leaves
4559
 * to which the tree applies.
4560
 */
4561
struct isl_schedule_expand_data {
4562
  isl_schedule_tree *tree;
4563
  isl_union_set *domain;
4564
};
4565
4566
/* If "node" is a leaf, then plug in data->tree, simplifying it
4567
 * within its new context.
4568
 *
4569
 * If there are any domain elements at the leaf where the tree
4570
 * should not be plugged in (i.e., there are elements not in data->domain)
4571
 * then first extend the tree to only apply to the elements in data->domain
4572
 * by constructing a set node that selects data->tree for elements
4573
 * in data->domain and a leaf for the other elements.
4574
 */
4575
static __isl_give isl_schedule_node *expand(__isl_take isl_schedule_node *node,
4576
  void *user)
4577
0
{
4578
0
  struct isl_schedule_expand_data *data = user;
4579
0
  isl_schedule_tree *tree, *leaf;
4580
0
  isl_union_set *domain, *left;
4581
0
  isl_bool empty;
4582
0
4583
0
  if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf)
4584
0
    return node;
4585
0
4586
0
  domain = isl_schedule_node_get_domain(node);
4587
0
  tree = isl_schedule_tree_copy(data->tree);
4588
0
4589
0
  left = isl_union_set_copy(domain);
4590
0
  left = isl_union_set_subtract(left, isl_union_set_copy(data->domain));
4591
0
  empty = isl_union_set_is_empty(left);
4592
0
  if (empty >= 0 && !empty) {
4593
0
    leaf = isl_schedule_node_get_leaf(node);
4594
0
    leaf = isl_schedule_tree_insert_filter(leaf, left);
4595
0
    left = isl_union_set_copy(data->domain);
4596
0
    tree = isl_schedule_tree_insert_filter(tree, left);
4597
0
    tree = isl_schedule_tree_set_pair(tree, leaf);
4598
0
  } else {
4599
0
    if (empty < 0)
4600
0
      node = isl_schedule_node_free(node);
4601
0
    isl_union_set_free(left);
4602
0
  }
4603
0
4604
0
  node = isl_schedule_node_graft_tree(node, tree);
4605
0
  node = isl_schedule_node_gist(node, domain);
4606
0
4607
0
  return node;
4608
0
}
4609
4610
/* Expand the tree rooted at "node" by extending all leaves
4611
 * with an expansion node with as child "tree".
4612
 * The expansion is determined by "contraction" and "domain".
4613
 * That is, the elements of "domain" are contracted according
4614
 * to "contraction".  The expansion relation is then the inverse
4615
 * of "contraction" with its range intersected with "domain".
4616
 *
4617
 * Insert the appropriate expansion node on top of "tree" and
4618
 * then plug in the result in all leaves of "node".
4619
 */
4620
__isl_give isl_schedule_node *isl_schedule_node_expand(
4621
  __isl_take isl_schedule_node *node,
4622
  __isl_take isl_union_pw_multi_aff *contraction,
4623
  __isl_take isl_union_set *domain,
4624
  __isl_take isl_schedule_tree *tree)
4625
0
{
4626
0
  struct isl_schedule_expand_data data;
4627
0
  isl_union_map *expansion;
4628
0
  isl_union_pw_multi_aff *copy;
4629
0
4630
0
  if (!node || !contraction || !tree)
4631
0
    node = isl_schedule_node_free(node);
4632
0
4633
0
  copy = isl_union_pw_multi_aff_copy(contraction);
4634
0
  expansion = isl_union_map_from_union_pw_multi_aff(copy);
4635
0
  expansion = isl_union_map_reverse(expansion);
4636
0
  expansion = isl_union_map_intersect_range(expansion, domain);
4637
0
  data.domain = isl_union_map_domain(isl_union_map_copy(expansion));
4638
0
4639
0
  tree = isl_schedule_tree_insert_expansion(tree, contraction, expansion);
4640
0
  data.tree = tree;
4641
0
4642
0
  node = isl_schedule_node_map_descendant_bottom_up(node, &expand, &data);
4643
0
  isl_union_set_free(data.domain);
4644
0
  isl_schedule_tree_free(data.tree);
4645
0
  return node;
4646
0
}
4647
4648
/* Return the position of the subtree containing "node" among the children
4649
 * of "ancestor".  "node" is assumed to be a descendant of "ancestor".
4650
 * In particular, both nodes should point to the same schedule tree.
4651
 *
4652
 * Return -1 on error.
4653
 */
4654
int isl_schedule_node_get_ancestor_child_position(
4655
  __isl_keep isl_schedule_node *node,
4656
  __isl_keep isl_schedule_node *ancestor)
4657
64.0k
{
4658
64.0k
  int n1, n2;
4659
64.0k
  isl_schedule_tree *tree;
4660
64.0k
4661
64.0k
  if (!node || !ancestor)
4662
0
    return -1;
4663
64.0k
4664
64.0k
  if (node->schedule != ancestor->schedule)
4665
64.0k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
4666
64.0k
      "not a descendant", return -1);
4667
64.0k
4668
64.0k
  n1 = isl_schedule_node_get_tree_depth(ancestor);
4669
64.0k
  n2 = isl_schedule_node_get_tree_depth(node);
4670
64.0k
4671
64.0k
  if (n1 >= n2)
4672
64.0k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
4673
64.0k
      "not a descendant", return -1);
4674
64.0k
  tree = isl_schedule_tree_list_get_schedule_tree(node->ancestors, n1);
4675
64.0k
  isl_schedule_tree_free(tree);
4676
64.0k
  if (tree != ancestor->tree)
4677
64.0k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
4678
64.0k
      "not a descendant", return -1);
4679
64.0k
4680
64.0k
  return node->child_pos[n1];
4681
64.0k
}
4682
4683
/* Given two nodes that point to the same schedule tree, return their
4684
 * closest shared ancestor.
4685
 *
4686
 * Since the two nodes point to the same schedule, they share at least
4687
 * one ancestor, the root of the schedule.  We move down from the root
4688
 * to the first ancestor where the respective children have a different
4689
 * child position.  This is the requested ancestor.
4690
 * If there is no ancestor where the children have a different position,
4691
 * then one node is an ancestor of the other and then this node is
4692
 * the requested ancestor.
4693
 */
4694
__isl_give isl_schedule_node *isl_schedule_node_get_shared_ancestor(
4695
  __isl_keep isl_schedule_node *node1,
4696
  __isl_keep isl_schedule_node *node2)
4697
62.9k
{
4698
62.9k
  int i, n1, n2;
4699
62.9k
4700
62.9k
  if (!node1 || !node2)
4701
0
    return NULL;
4702
62.9k
  if (node1->schedule != node2->schedule)
4703
62.9k
    
isl_die0
(isl_schedule_node_get_ctx(node1), isl_error_invalid,
4704
62.9k
      "not part of same schedule", return NULL);
4705
62.9k
  n1 = isl_schedule_node_get_tree_depth(node1);
4706
62.9k
  n2 = isl_schedule_node_get_tree_depth(node2);
4707
62.9k
  if (n2 < n1)
4708
2.48k
    return isl_schedule_node_get_shared_ancestor(node2, node1);
4709
60.5k
  if (n1 == 0)
4710
0
    return isl_schedule_node_copy(node1);
4711
60.5k
  if (isl_schedule_node_is_equal(node1, node2))
4712
28.4k
    return isl_schedule_node_copy(node1);
4713
32.0k
4714
80.9k
  
for (i = 0; 32.0k
i < n1;
++i48.9k
)
4715
80.9k
    if (node1->child_pos[i] != node2->child_pos[i])
4716
32.0k
      break;
4717
32.0k
4718
32.0k
  node1 = isl_schedule_node_copy(node1);
4719
32.0k
  return isl_schedule_node_ancestor(node1, n1 - i);
4720
32.0k
}
4721
4722
/* Print "node" to "p".
4723
 */
4724
__isl_give isl_printer *isl_printer_print_schedule_node(
4725
  __isl_take isl_printer *p, __isl_keep isl_schedule_node *node)
4726
0
{
4727
0
  if (!node)
4728
0
    return isl_printer_free(p);
4729
0
  return isl_printer_print_schedule_tree_mark(p, node->schedule->root,
4730
0
      isl_schedule_tree_list_n_schedule_tree(node->ancestors),
4731
0
      node->child_pos);
4732
0
}
4733
4734
void isl_schedule_node_dump(__isl_keep isl_schedule_node *node)
4735
0
{
4736
0
  isl_ctx *ctx;
4737
0
  isl_printer *printer;
4738
0
4739
0
  if (!node)
4740
0
    return;
4741
0
4742
0
  ctx = isl_schedule_node_get_ctx(node);
4743
0
  printer = isl_printer_to_file(ctx, stderr);
4744
0
  printer = isl_printer_set_yaml_style(printer, ISL_YAML_STYLE_BLOCK);
4745
0
  printer = isl_printer_print_schedule_node(printer, node);
4746
0
4747
0
  isl_printer_free(printer);
4748
0
}
4749
4750
/* Return a string representation of "node".
4751
 * Print the schedule node in block format as it would otherwise
4752
 * look identical to the entire schedule.
4753
 */
4754
__isl_give char *isl_schedule_node_to_str(__isl_keep isl_schedule_node *node)
4755
0
{
4756
0
  isl_printer *printer;
4757
0
  char *s;
4758
0
4759
0
  if (!node)
4760
0
    return NULL;
4761
0
4762
0
  printer = isl_printer_to_str(isl_schedule_node_get_ctx(node));
4763
0
  printer = isl_printer_set_yaml_style(printer, ISL_YAML_STYLE_BLOCK);
4764
0
  printer = isl_printer_print_schedule_node(printer, node);
4765
0
  s = isl_printer_get_str(printer);
4766
0
  isl_printer_free(printer);
4767
0
4768
0
  return s;
4769
0
}