Coverage Report

Created: 2018-02-20 12:54

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/polly/lib/External/isl/isl_schedule_node.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright 2013-2014 Ecole Normale Superieure
3
 * Copyright 2014      INRIA Rocquencourt
4
 * Copyright 2016      Sven Verdoolaege
5
 *
6
 * Use of this software is governed by the MIT license
7
 *
8
 * Written by Sven Verdoolaege,
9
 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
10
 * and Inria Paris - Rocquencourt, Domaine de Voluceau - Rocquencourt,
11
 * B.P. 105 - 78153 Le Chesnay, France
12
 */
13
14
#include <isl/val.h>
15
#include <isl/space.h>
16
#include <isl/set.h>
17
#include <isl_schedule_band.h>
18
#include <isl_schedule_private.h>
19
#include <isl_schedule_node_private.h>
20
21
/* Create a new schedule node in the given schedule, point at the given
22
 * tree with given ancestors and child positions.
23
 * "child_pos" may be NULL if there are no ancestors.
24
 */
25
__isl_give isl_schedule_node *isl_schedule_node_alloc(
26
  __isl_take isl_schedule *schedule, __isl_take isl_schedule_tree *tree,
27
  __isl_take isl_schedule_tree_list *ancestors, int *child_pos)
28
61.3k
{
29
61.3k
  isl_ctx *ctx;
30
61.3k
  isl_schedule_node *node;
31
61.3k
  int i, n;
32
61.3k
33
61.3k
  if (!schedule || !tree || !ancestors)
34
0
    goto error;
35
61.3k
  n = isl_schedule_tree_list_n_schedule_tree(ancestors);
36
61.3k
  if (n > 0 && 
!child_pos40.0k
)
37
0
    goto error;
38
61.3k
  ctx = isl_schedule_get_ctx(schedule);
39
61.3k
  node = isl_calloc_type(ctx, isl_schedule_node);
40
61.3k
  if (!node)
41
0
    goto error;
42
61.3k
  node->ref = 1;
43
61.3k
  node->schedule = schedule;
44
61.3k
  node->tree = tree;
45
61.3k
  node->ancestors = ancestors;
46
61.3k
  node->child_pos = isl_alloc_array(ctx, int, n);
47
61.3k
  if (n && 
!node->child_pos40.0k
)
48
0
    return isl_schedule_node_free(node);
49
252k
  
for (i = 0; 61.3k
i < n;
++i191k
)
50
191k
    node->child_pos[i] = child_pos[i];
51
61.3k
52
61.3k
  return node;
53
0
error:
54
0
  isl_schedule_free(schedule);
55
0
  isl_schedule_tree_free(tree);
56
0
  isl_schedule_tree_list_free(ancestors);
57
0
  return NULL;
58
61.3k
}
59
60
/* Return a pointer to the root of a schedule tree with as single
61
 * node a domain node with the given domain.
62
 */
63
__isl_give isl_schedule_node *isl_schedule_node_from_domain(
64
  __isl_take isl_union_set *domain)
65
126
{
66
126
  isl_schedule *schedule;
67
126
  isl_schedule_node *node;
68
126
69
126
  schedule = isl_schedule_from_domain(domain);
70
126
  node = isl_schedule_get_root(schedule);
71
126
  isl_schedule_free(schedule);
72
126
73
126
  return node;
74
126
}
75
76
/* Return a pointer to the root of a schedule tree with as single
77
 * node a extension node with the given extension.
78
 */
79
__isl_give isl_schedule_node *isl_schedule_node_from_extension(
80
  __isl_take isl_union_map *extension)
81
24
{
82
24
  isl_ctx *ctx;
83
24
  isl_schedule *schedule;
84
24
  isl_schedule_tree *tree;
85
24
  isl_schedule_node *node;
86
24
87
24
  if (!extension)
88
0
    return NULL;
89
24
90
24
  ctx = isl_union_map_get_ctx(extension);
91
24
  tree = isl_schedule_tree_from_extension(extension);
92
24
  schedule = isl_schedule_from_schedule_tree(ctx, tree);
93
24
  node = isl_schedule_get_root(schedule);
94
24
  isl_schedule_free(schedule);
95
24
96
24
  return node;
97
24
}
98
99
/* Return the isl_ctx to which "node" belongs.
100
 */
101
isl_ctx *isl_schedule_node_get_ctx(__isl_keep isl_schedule_node *node)
102
43.9k
{
103
43.9k
  return node ? isl_schedule_get_ctx(node->schedule) : NULL;
104
43.9k
}
105
106
/* Return a pointer to the leaf of the schedule into which "node" points.
107
 */
108
__isl_keep isl_schedule_tree *isl_schedule_node_peek_leaf(
109
  __isl_keep isl_schedule_node *node)
110
21.3k
{
111
21.3k
  return node ? isl_schedule_peek_leaf(node->schedule) : NULL;
112
21.3k
}
113
114
/* Return a copy of the leaf of the schedule into which "node" points.
115
 */
116
__isl_give isl_schedule_tree *isl_schedule_node_get_leaf(
117
  __isl_keep isl_schedule_node *node)
118
19.7k
{
119
19.7k
  return isl_schedule_tree_copy(isl_schedule_node_peek_leaf(node));
120
19.7k
}
121
122
/* Return the type of the node or isl_schedule_node_error on error.
123
 */
124
enum isl_schedule_node_type isl_schedule_node_get_type(
125
  __isl_keep isl_schedule_node *node)
126
117k
{
127
117k
  return node ? isl_schedule_tree_get_type(node->tree)
128
117k
        : 
isl_schedule_node_error0
;
129
117k
}
130
131
/* Return the type of the parent of "node" or isl_schedule_node_error on error.
132
 */
133
enum isl_schedule_node_type isl_schedule_node_get_parent_type(
134
  __isl_keep isl_schedule_node *node)
135
3.17k
{
136
3.17k
  int pos;
137
3.17k
  int has_parent;
138
3.17k
  isl_schedule_tree *parent;
139
3.17k
  enum isl_schedule_node_type type;
140
3.17k
141
3.17k
  if (!node)
142
0
    return isl_schedule_node_error;
143
3.17k
  has_parent = isl_schedule_node_has_parent(node);
144
3.17k
  if (has_parent < 0)
145
0
    return isl_schedule_node_error;
146
3.17k
  if (!has_parent)
147
3.17k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
148
3.17k
      "node has no parent", return isl_schedule_node_error);
149
3.17k
150
3.17k
  pos = isl_schedule_tree_list_n_schedule_tree(node->ancestors) - 1;
151
3.17k
  parent = isl_schedule_tree_list_get_schedule_tree(node->ancestors, pos);
152
3.17k
  type = isl_schedule_tree_get_type(parent);
153
3.17k
  isl_schedule_tree_free(parent);
154
3.17k
155
3.17k
  return type;
156
3.17k
}
157
158
/* Return a copy of the subtree that this node points to.
159
 */
160
__isl_give isl_schedule_tree *isl_schedule_node_get_tree(
161
  __isl_keep isl_schedule_node *node)
162
6.13k
{
163
6.13k
  if (!node)
164
0
    return NULL;
165
6.13k
166
6.13k
  return isl_schedule_tree_copy(node->tree);
167
6.13k
}
168
169
/* Return a copy of the schedule into which "node" points.
170
 */
171
__isl_give isl_schedule *isl_schedule_node_get_schedule(
172
  __isl_keep isl_schedule_node *node)
173
6.27k
{
174
6.27k
  if (!node)
175
0
    return NULL;
176
6.27k
  return isl_schedule_copy(node->schedule);
177
6.27k
}
178
179
/* Return a fresh copy of "node".
180
 */
181
__isl_take isl_schedule_node *isl_schedule_node_dup(
182
  __isl_keep isl_schedule_node *node)
183
46.5k
{
184
46.5k
  if (!node)
185
0
    return NULL;
186
46.5k
187
46.5k
  return isl_schedule_node_alloc(isl_schedule_copy(node->schedule),
188
46.5k
        isl_schedule_tree_copy(node->tree),
189
46.5k
        isl_schedule_tree_list_copy(node->ancestors),
190
46.5k
        node->child_pos);
191
46.5k
}
192
193
/* Return an isl_schedule_node that is equal to "node" and that has only
194
 * a single reference.
195
 */
196
__isl_give isl_schedule_node *isl_schedule_node_cow(
197
  __isl_take isl_schedule_node *node)
198
131k
{
199
131k
  if (!node)
200
0
    return NULL;
201
131k
202
131k
  if (node->ref == 1)
203
84.8k
    return node;
204
46.5k
  node->ref--;
205
46.5k
  return isl_schedule_node_dup(node);
206
46.5k
}
207
208
/* Return a new reference to "node".
209
 */
210
__isl_give isl_schedule_node *isl_schedule_node_copy(
211
  __isl_keep isl_schedule_node *node)
212
116k
{
213
116k
  if (!node)
214
12.8k
    return NULL;
215
103k
216
103k
  node->ref++;
217
103k
  return node;
218
103k
}
219
220
/* Free "node" and return NULL.
221
 */
222
__isl_null isl_schedule_node *isl_schedule_node_free(
223
  __isl_take isl_schedule_node *node)
224
134k
{
225
134k
  if (!node)
226
15.4k
    return NULL;
227
118k
  if (--node->ref > 0)
228
57.3k
    return NULL;
229
61.3k
230
61.3k
  isl_schedule_tree_list_free(node->ancestors);
231
61.3k
  free(node->child_pos);
232
61.3k
  isl_schedule_tree_free(node->tree);
233
61.3k
  isl_schedule_free(node->schedule);
234
61.3k
  free(node);
235
61.3k
236
61.3k
  return NULL;
237
61.3k
}
238
239
/* Do "node1" and "node2" point to the same position in the same
240
 * schedule?
241
 */
242
isl_bool isl_schedule_node_is_equal(__isl_keep isl_schedule_node *node1,
243
  __isl_keep isl_schedule_node *node2)
244
60.3k
{
245
60.3k
  int i, n1, n2;
246
60.3k
247
60.3k
  if (!node1 || !node2)
248
0
    return isl_bool_error;
249
60.3k
  if (node1 == node2)
250
28.2k
    return isl_bool_true;
251
32.0k
  if (node1->schedule != node2->schedule)
252
0
    return isl_bool_false;
253
32.0k
254
32.0k
  n1 = isl_schedule_node_get_tree_depth(node1);
255
32.0k
  n2 = isl_schedule_node_get_tree_depth(node2);
256
32.0k
  if (n1 != n2)
257
6.73k
    return isl_bool_false;
258
59.5k
  
for (i = 0; 25.3k
i < n1;
++i34.2k
)
259
59.5k
    if (node1->child_pos[i] != node2->child_pos[i])
260
25.3k
      return isl_bool_false;
261
25.3k
262
25.3k
  
return isl_bool_true0
;
263
25.3k
}
264
265
/* Return the number of outer schedule dimensions of "node"
266
 * in its schedule tree.
267
 *
268
 * Return -1 on error.
269
 */
270
int isl_schedule_node_get_schedule_depth(__isl_keep isl_schedule_node *node)
271
62.7k
{
272
62.7k
  int i, n;
273
62.7k
  int depth = 0;
274
62.7k
275
62.7k
  if (!node)
276
0
    return -1;
277
62.7k
278
62.7k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
279
242k
  for (i = n - 1; i >= 0; 
--i179k
) {
280
179k
    isl_schedule_tree *tree;
281
179k
282
179k
    tree = isl_schedule_tree_list_get_schedule_tree(
283
179k
                node->ancestors, i);
284
179k
    if (!tree)
285
0
      return -1;
286
179k
    if (tree->type == isl_schedule_node_band)
287
60.7k
      depth += isl_schedule_tree_band_n_member(tree);
288
179k
    isl_schedule_tree_free(tree);
289
179k
  }
290
62.7k
291
62.7k
  return depth;
292
62.7k
}
293
294
/* Internal data structure for
295
 * isl_schedule_node_get_prefix_schedule_union_pw_multi_aff
296
 *
297
 * "initialized" is set if the filter field has been initialized.
298
 * If "universe_domain" is not set, then the collected filter is intersected
299
 * with the domain of the root domain node.
300
 * "universe_filter" is set if we are only collecting the universes of filters
301
 * "collect_prefix" is set if we are collecting prefixes.
302
 * "filter" collects all outer filters and is NULL until "initialized" is set.
303
 * "prefix" collects all outer band partial schedules (if "collect_prefix"
304
 * is set).  If it is used, then it is initialized by the caller
305
 * of collect_filter_prefix to a zero-dimensional function.
306
 */
307
struct isl_schedule_node_get_filter_prefix_data {
308
  int initialized;
309
  int universe_domain;
310
  int universe_filter;
311
  int collect_prefix;
312
  isl_union_set *filter;
313
  isl_multi_union_pw_aff *prefix;
314
};
315
316
static int collect_filter_prefix(__isl_keep isl_schedule_tree_list *list,
317
  int n, struct isl_schedule_node_get_filter_prefix_data *data);
318
319
/* Update the filter and prefix information in "data" based on the first "n"
320
 * elements in "list" and the expansion tree root "tree".
321
 *
322
 * We first collect the information from the elements in "list",
323
 * initializing the filter based on the domain of the expansion.
324
 * Then we map the results to the expanded space and combined them
325
 * with the results already in "data".
326
 */
327
static int collect_filter_prefix_expansion(__isl_take isl_schedule_tree *tree,
328
  __isl_keep isl_schedule_tree_list *list, int n,
329
  struct isl_schedule_node_get_filter_prefix_data *data)
330
6
{
331
6
  struct isl_schedule_node_get_filter_prefix_data contracted;
332
6
  isl_union_pw_multi_aff *c;
333
6
  isl_union_map *exp, *universe;
334
6
  isl_union_set *filter;
335
6
336
6
  c = isl_schedule_tree_expansion_get_contraction(tree);
337
6
  exp = isl_schedule_tree_expansion_get_expansion(tree);
338
6
339
6
  contracted.initialized = 1;
340
6
  contracted.universe_domain = data->universe_domain;
341
6
  contracted.universe_filter = data->universe_filter;
342
6
  contracted.collect_prefix = data->collect_prefix;
343
6
  universe = isl_union_map_universe(isl_union_map_copy(exp));
344
6
  filter = isl_union_map_domain(universe);
345
6
  if (data->collect_prefix) {
346
3
    isl_space *space = isl_union_set_get_space(filter);
347
3
    space = isl_space_set_from_params(space);
348
3
    contracted.prefix = isl_multi_union_pw_aff_zero(space);
349
3
  }
350
6
  contracted.filter = filter;
351
6
352
6
  if (collect_filter_prefix(list, n, &contracted) < 0)
353
0
    contracted.filter = isl_union_set_free(contracted.filter);
354
6
  if (data->collect_prefix) {
355
3
    isl_multi_union_pw_aff *prefix;
356
3
357
3
    prefix = contracted.prefix;
358
3
    prefix =
359
3
        isl_multi_union_pw_aff_pullback_union_pw_multi_aff(prefix,
360
3
            isl_union_pw_multi_aff_copy(c));
361
3
    data->prefix = isl_multi_union_pw_aff_flat_range_product(
362
3
            prefix, data->prefix);
363
3
  }
364
6
  filter = contracted.filter;
365
6
  if (data->universe_domain)
366
3
    filter = isl_union_set_preimage_union_pw_multi_aff(filter,
367
3
            isl_union_pw_multi_aff_copy(c));
368
3
  else
369
3
    filter = isl_union_set_apply(filter, isl_union_map_copy(exp));
370
6
  if (!data->initialized)
371
1
    data->filter = filter;
372
5
  else
373
5
    data->filter = isl_union_set_intersect(filter, data->filter);
374
6
  data->initialized = 1;
375
6
376
6
  isl_union_pw_multi_aff_free(c);
377
6
  isl_union_map_free(exp);
378
6
  isl_schedule_tree_free(tree);
379
6
380
6
  return 0;
381
6
}
382
383
/* Update the filter information in "data" based on the first "n"
384
 * elements in "list" and the extension tree root "tree", in case
385
 * data->universe_domain is set and data->collect_prefix is not.
386
 *
387
 * We collect the universe domain of the elements in "list" and
388
 * add it to the universe range of the extension (intersected
389
 * with the already collected filter, if any).
390
 */
391
static int collect_universe_domain_extension(__isl_take isl_schedule_tree *tree,
392
  __isl_keep isl_schedule_tree_list *list, int n,
393
  struct isl_schedule_node_get_filter_prefix_data *data)
394
12
{
395
12
  struct isl_schedule_node_get_filter_prefix_data data_outer;
396
12
  isl_union_map *extension;
397
12
  isl_union_set *filter;
398
12
399
12
  data_outer.initialized = 0;
400
12
  data_outer.universe_domain = 1;
401
12
  data_outer.universe_filter = data->universe_filter;
402
12
  data_outer.collect_prefix = 0;
403
12
  data_outer.filter = NULL;
404
12
  data_outer.prefix = NULL;
405
12
406
12
  if (collect_filter_prefix(list, n, &data_outer) < 0)
407
0
    data_outer.filter = isl_union_set_free(data_outer.filter);
408
12
409
12
  extension = isl_schedule_tree_extension_get_extension(tree);
410
12
  extension = isl_union_map_universe(extension);
411
12
  filter = isl_union_map_range(extension);
412
12
  if (data_outer.initialized)
413
12
    filter = isl_union_set_union(filter, data_outer.filter);
414
12
  if (data->initialized)
415
12
    filter = isl_union_set_intersect(filter, data->filter);
416
12
417
12
  data->filter = filter;
418
12
419
12
  isl_schedule_tree_free(tree);
420
12
421
12
  return 0;
422
12
}
423
424
/* Update "data" based on the tree node "tree" in case "data" has
425
 * not been initialized yet.
426
 *
427
 * Return 0 on success and -1 on error.
428
 *
429
 * If "tree" is a filter, then we set data->filter to this filter
430
 * (or its universe).
431
 * If "tree" is a domain, then this means we have reached the root
432
 * of the schedule tree without being able to extract any information.
433
 * We therefore initialize data->filter to the universe of the domain,
434
 * or the domain itself if data->universe_domain is not set.
435
 * If "tree" is a band with at least one member, then we set data->filter
436
 * to the universe of the schedule domain and replace the zero-dimensional
437
 * data->prefix by the band schedule (if data->collect_prefix is set).
438
 */
439
static int collect_filter_prefix_init(__isl_keep isl_schedule_tree *tree,
440
  struct isl_schedule_node_get_filter_prefix_data *data)
441
8.09k
{
442
8.09k
  enum isl_schedule_node_type type;
443
8.09k
  isl_multi_union_pw_aff *mupa;
444
8.09k
  isl_union_set *filter;
445
8.09k
446
8.09k
  type = isl_schedule_tree_get_type(tree);
447
8.09k
  switch (type) {
448
8.09k
  case isl_schedule_node_error:
449
0
    return -1;
450
8.09k
  case isl_schedule_node_expansion:
451
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_internal,
452
0
      "should be handled by caller", return -1);
453
0
  case isl_schedule_node_extension:
454
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_invalid,
455
0
      "cannot handle extension nodes", return -1);
456
15
  case isl_schedule_node_context:
457
15
  case isl_schedule_node_leaf:
458
15
  case isl_schedule_node_guard:
459
15
  case isl_schedule_node_mark:
460
15
  case isl_schedule_node_sequence:
461
15
  case isl_schedule_node_set:
462
15
    return 0;
463
316
  case isl_schedule_node_domain:
464
316
    filter = isl_schedule_tree_domain_get_domain(tree);
465
316
    if (data->universe_domain)
466
174
      filter = isl_union_set_universe(filter);
467
316
    data->filter = filter;
468
316
    break;
469
4.63k
  case isl_schedule_node_band:
470
4.63k
    if (isl_schedule_tree_band_n_member(tree) == 0)
471
0
      return 0;
472
4.63k
    mupa = isl_schedule_tree_band_get_partial_schedule(tree);
473
4.63k
    if (data->collect_prefix) {
474
2.30k
      isl_multi_union_pw_aff_free(data->prefix);
475
2.30k
      mupa = isl_multi_union_pw_aff_reset_tuple_id(mupa,
476
2.30k
                isl_dim_set);
477
2.30k
      data->prefix = isl_multi_union_pw_aff_copy(mupa);
478
2.30k
    }
479
4.63k
    filter = isl_multi_union_pw_aff_domain(mupa);
480
4.63k
    filter = isl_union_set_universe(filter);
481
4.63k
    data->filter = filter;
482
4.63k
    break;
483
4.63k
  case isl_schedule_node_filter:
484
3.12k
    filter = isl_schedule_tree_filter_get_filter(tree);
485
3.12k
    if (data->universe_filter)
486
1.56k
      filter = isl_union_set_universe(filter);
487
3.12k
    data->filter = filter;
488
3.12k
    break;
489
8.07k
  }
490
8.07k
491
8.07k
  if ((data->collect_prefix && 
!data->prefix4.00k
) || !data->filter)
492
0
    return -1;
493
8.07k
494
8.07k
  data->initialized = 1;
495
8.07k
496
8.07k
  return 0;
497
8.07k
}
498
499
/* Update "data" based on the tree node "tree" in case "data" has
500
 * already been initialized.
501
 *
502
 * Return 0 on success and -1 on error.
503
 *
504
 * If "tree" is a domain and data->universe_domain is not set, then
505
 * intersect data->filter with the domain.
506
 * If "tree" is a filter, then we intersect data->filter with this filter
507
 * (or its universe).
508
 * If "tree" is a band with at least one member and data->collect_prefix
509
 * is set, then we extend data->prefix with the band schedule.
510
 * If "tree" is an extension, then we make sure that we are not collecting
511
 * information on any extended domain elements.
512
 */
513
static int collect_filter_prefix_update(__isl_keep isl_schedule_tree *tree,
514
  struct isl_schedule_node_get_filter_prefix_data *data)
515
20.4k
{
516
20.4k
  enum isl_schedule_node_type type;
517
20.4k
  isl_multi_union_pw_aff *mupa;
518
20.4k
  isl_union_set *filter;
519
20.4k
  isl_union_map *extension;
520
20.4k
  int empty;
521
20.4k
522
20.4k
  type = isl_schedule_tree_get_type(tree);
523
20.4k
  switch (type) {
524
20.4k
  case isl_schedule_node_error:
525
0
    return -1;
526
20.4k
  case isl_schedule_node_expansion:
527
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_internal,
528
0
      "should be handled by caller", return -1);
529
12
  case isl_schedule_node_extension:
530
12
    extension = isl_schedule_tree_extension_get_extension(tree);
531
12
    extension = isl_union_map_intersect_range(extension,
532
12
          isl_union_set_copy(data->filter));
533
12
    empty = isl_union_map_is_empty(extension);
534
12
    isl_union_map_free(extension);
535
12
    if (empty < 0)
536
0
      return -1;
537
12
    if (empty)
538
12
      break;
539
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_invalid,
540
0
      "cannot handle extension nodes", return -1);
541
5.60k
  case isl_schedule_node_context:
542
5.60k
  case isl_schedule_node_leaf:
543
5.60k
  case isl_schedule_node_guard:
544
5.60k
  case isl_schedule_node_mark:
545
5.60k
  case isl_schedule_node_sequence:
546
5.60k
  case isl_schedule_node_set:
547
5.60k
    break;
548
7.75k
  case isl_schedule_node_domain:
549
7.75k
    if (data->universe_domain)
550
3.87k
      break;
551
3.87k
    filter = isl_schedule_tree_domain_get_domain(tree);
552
3.87k
    data->filter = isl_union_set_intersect(data->filter, filter);
553
3.87k
    break;
554
4.82k
  case isl_schedule_node_band:
555
4.82k
    if (isl_schedule_tree_band_n_member(tree) == 0)
556
0
      break;
557
4.82k
    if (!data->collect_prefix)
558
2.37k
      break;
559
2.44k
    mupa = isl_schedule_tree_band_get_partial_schedule(tree);
560
2.44k
    data->prefix = isl_multi_union_pw_aff_flat_range_product(mupa,
561
2.44k
                data->prefix);
562
2.44k
    if (!data->prefix)
563
0
      return -1;
564
2.44k
    break;
565
2.44k
  case isl_schedule_node_filter:
566
2.20k
    filter = isl_schedule_tree_filter_get_filter(tree);
567
2.20k
    if (data->universe_filter)
568
1.09k
      filter = isl_union_set_universe(filter);
569
2.20k
    data->filter = isl_union_set_intersect(data->filter, filter);
570
2.20k
    if (!data->filter)
571
0
      return -1;
572
2.20k
    break;
573
20.4k
  }
574
20.4k
575
20.4k
  return 0;
576
20.4k
}
577
578
/* Collect filter and/or prefix information from the first "n"
579
 * elements in "list" (which represent the ancestors of a node).
580
 * Store the results in "data".
581
 *
582
 * Extension nodes are only supported if they do not affect the outcome,
583
 * i.e., if we are collecting information on non-extended domain elements,
584
 * or if we are collecting the universe domain (without prefix).
585
 *
586
 * Return 0 on success and -1 on error.
587
 *
588
 * We traverse the list from innermost ancestor (last element)
589
 * to outermost ancestor (first element), calling collect_filter_prefix_init
590
 * on each node as long as we have not been able to extract any information
591
 * yet and collect_filter_prefix_update afterwards.
592
 * If we come across an expansion node, then we interrupt the traversal
593
 * and call collect_filter_prefix_expansion to restart the traversal
594
 * over the remaining ancestors and to combine the results with those
595
 * that have already been collected.
596
 * If we come across an extension node and we are only computing
597
 * the universe domain, then we interrupt the traversal and call
598
 * collect_universe_domain_extension to restart the traversal
599
 * over the remaining ancestors and to combine the results with those
600
 * that have already been collected.
601
 * On successful return, data->initialized will be set since the outermost
602
 * ancestor is a domain node, which always results in an initialization.
603
 */
604
static int collect_filter_prefix(__isl_keep isl_schedule_tree_list *list,
605
  int n, struct isl_schedule_node_get_filter_prefix_data *data)
606
8.08k
{
607
8.08k
  int i;
608
8.08k
609
8.08k
  if (!list)
610
0
    return -1;
611
8.08k
612
36.5k
  
for (i = n - 1; 8.08k
i >= 0;
--i28.4k
) {
613
28.5k
    isl_schedule_tree *tree;
614
28.5k
    enum isl_schedule_node_type type;
615
28.5k
    int r;
616
28.5k
617
28.5k
    tree = isl_schedule_tree_list_get_schedule_tree(list, i);
618
28.5k
    if (!tree)
619
0
      return -1;
620
28.5k
    type = isl_schedule_tree_get_type(tree);
621
28.5k
    if (type == isl_schedule_node_expansion)
622
6
      return collect_filter_prefix_expansion(tree, list, i,
623
6
                data);
624
28.5k
    if (type == isl_schedule_node_extension &&
625
28.5k
        
data->universe_domain24
&&
!data->collect_prefix12
)
626
12
      return collect_universe_domain_extension(tree, list, i,
627
12
                data);
628
28.4k
    if (!data->initialized)
629
8.09k
      r = collect_filter_prefix_init(tree, data);
630
20.4k
    else
631
20.4k
      r = collect_filter_prefix_update(tree, data);
632
28.4k
    isl_schedule_tree_free(tree);
633
28.4k
    if (r < 0)
634
0
      return -1;
635
28.4k
  }
636
8.08k
637
8.08k
  
return 08.06k
;
638
8.08k
}
639
640
/* Return the concatenation of the partial schedules of all outer band
641
 * nodes of "node" interesected with all outer filters
642
 * as an isl_multi_union_pw_aff.
643
 * None of the ancestors of "node" may be an extension node, unless
644
 * there is also a filter ancestor that filters out all the extended
645
 * domain elements.
646
 *
647
 * If "node" is pointing at the root of the schedule tree, then
648
 * there are no domain elements reaching the current node, so
649
 * we return an empty result.
650
 *
651
 * We collect all the filters and partial schedules in collect_filter_prefix
652
 * and intersect the domain of the combined schedule with the combined filter.
653
 */
654
__isl_give isl_multi_union_pw_aff *
655
isl_schedule_node_get_prefix_schedule_multi_union_pw_aff(
656
  __isl_keep isl_schedule_node *node)
657
3
{
658
3
  int n;
659
3
  isl_space *space;
660
3
  struct isl_schedule_node_get_filter_prefix_data data;
661
3
662
3
  if (!node)
663
0
    return NULL;
664
3
665
3
  space = isl_schedule_get_space(node->schedule);
666
3
  space = isl_space_set_from_params(space);
667
3
  if (node->tree == node->schedule->root)
668
0
    return isl_multi_union_pw_aff_zero(space);
669
3
670
3
  data.initialized = 0;
671
3
  data.universe_domain = 1;
672
3
  data.universe_filter = 0;
673
3
  data.collect_prefix = 1;
674
3
  data.filter = NULL;
675
3
  data.prefix = isl_multi_union_pw_aff_zero(space);
676
3
677
3
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
678
3
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
679
0
    data.prefix = isl_multi_union_pw_aff_free(data.prefix);
680
3
681
3
  data.prefix = isl_multi_union_pw_aff_intersect_domain(data.prefix,
682
3
                data.filter);
683
3
684
3
  return data.prefix;
685
3
}
686
687
/* Return the concatenation of the partial schedules of all outer band
688
 * nodes of "node" interesected with all outer filters
689
 * as an isl_union_pw_multi_aff.
690
 * None of the ancestors of "node" may be an extension node, unless
691
 * there is also a filter ancestor that filters out all the extended
692
 * domain elements.
693
 *
694
 * If "node" is pointing at the root of the schedule tree, then
695
 * there are no domain elements reaching the current node, so
696
 * we return an empty result.
697
 *
698
 * We collect all the filters and partial schedules in collect_filter_prefix.
699
 * The partial schedules are collected as an isl_multi_union_pw_aff.
700
 * If this isl_multi_union_pw_aff is zero-dimensional, then it does not
701
 * contain any domain information, so we construct the isl_union_pw_multi_aff
702
 * result as a zero-dimensional function on the collected filter.
703
 * Otherwise, we convert the isl_multi_union_pw_aff to
704
 * an isl_multi_union_pw_aff and intersect the domain with the filter.
705
 */
706
__isl_give isl_union_pw_multi_aff *
707
isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(
708
  __isl_keep isl_schedule_node *node)
709
16
{
710
16
  int n;
711
16
  isl_space *space;
712
16
  isl_union_pw_multi_aff *prefix;
713
16
  struct isl_schedule_node_get_filter_prefix_data data;
714
16
715
16
  if (!node)
716
0
    return NULL;
717
16
718
16
  space = isl_schedule_get_space(node->schedule);
719
16
  if (node->tree == node->schedule->root)
720
0
    return isl_union_pw_multi_aff_empty(space);
721
16
722
16
  space = isl_space_set_from_params(space);
723
16
  data.initialized = 0;
724
16
  data.universe_domain = 1;
725
16
  data.universe_filter = 0;
726
16
  data.collect_prefix = 1;
727
16
  data.filter = NULL;
728
16
  data.prefix = isl_multi_union_pw_aff_zero(space);
729
16
730
16
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
731
16
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
732
0
    data.prefix = isl_multi_union_pw_aff_free(data.prefix);
733
16
734
16
  if (data.prefix &&
735
16
      isl_multi_union_pw_aff_dim(data.prefix, isl_dim_set) == 0) {
736
0
    isl_multi_union_pw_aff_free(data.prefix);
737
0
    prefix = isl_union_pw_multi_aff_from_domain(data.filter);
738
16
  } else {
739
16
    prefix =
740
16
        isl_union_pw_multi_aff_from_multi_union_pw_aff(data.prefix);
741
16
    prefix = isl_union_pw_multi_aff_intersect_domain(prefix,
742
16
                data.filter);
743
16
  }
744
16
745
16
  return prefix;
746
16
}
747
748
/* Return the concatenation of the partial schedules of all outer band
749
 * nodes of "node" interesected with all outer filters
750
 * as an isl_union_map.
751
 */
752
__isl_give isl_union_map *isl_schedule_node_get_prefix_schedule_union_map(
753
  __isl_keep isl_schedule_node *node)
754
14
{
755
14
  isl_union_pw_multi_aff *upma;
756
14
757
14
  upma = isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(node);
758
14
  return isl_union_map_from_union_pw_multi_aff(upma);
759
14
}
760
761
/* Return the concatenation of the partial schedules of all outer band
762
 * nodes of "node" intersected with all outer domain constraints.
763
 * None of the ancestors of "node" may be an extension node, unless
764
 * there is also a filter ancestor that filters out all the extended
765
 * domain elements.
766
 *
767
 * Essentially, this function intersects the domain of the output
768
 * of isl_schedule_node_get_prefix_schedule_union_map with the output
769
 * of isl_schedule_node_get_domain, except that it only traverses
770
 * the ancestors of "node" once.
771
 */
772
__isl_give isl_union_map *isl_schedule_node_get_prefix_schedule_relation(
773
  __isl_keep isl_schedule_node *node)
774
3.98k
{
775
3.98k
  int n;
776
3.98k
  isl_space *space;
777
3.98k
  isl_union_map *prefix;
778
3.98k
  struct isl_schedule_node_get_filter_prefix_data data;
779
3.98k
780
3.98k
  if (!node)
781
0
    return NULL;
782
3.98k
783
3.98k
  space = isl_schedule_get_space(node->schedule);
784
3.98k
  if (node->tree == node->schedule->root)
785
0
    return isl_union_map_empty(space);
786
3.98k
787
3.98k
  space = isl_space_set_from_params(space);
788
3.98k
  data.initialized = 0;
789
3.98k
  data.universe_domain = 0;
790
3.98k
  data.universe_filter = 0;
791
3.98k
  data.collect_prefix = 1;
792
3.98k
  data.filter = NULL;
793
3.98k
  data.prefix = isl_multi_union_pw_aff_zero(space);
794
3.98k
795
3.98k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
796
3.98k
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
797
0
    data.prefix = isl_multi_union_pw_aff_free(data.prefix);
798
3.98k
799
3.98k
  if (data.prefix &&
800
3.98k
      isl_multi_union_pw_aff_dim(data.prefix, isl_dim_set) == 0) {
801
420
    isl_multi_union_pw_aff_free(data.prefix);
802
420
    prefix = isl_union_map_from_domain(data.filter);
803
3.56k
  } else {
804
3.56k
    prefix = isl_union_map_from_multi_union_pw_aff(data.prefix);
805
3.56k
    prefix = isl_union_map_intersect_domain(prefix, data.filter);
806
3.56k
  }
807
3.98k
808
3.98k
  return prefix;
809
3.98k
}
810
811
/* Return the domain elements that reach "node".
812
 *
813
 * If "node" is pointing at the root of the schedule tree, then
814
 * there are no domain elements reaching the current node, so
815
 * we return an empty result.
816
 * None of the ancestors of "node" may be an extension node, unless
817
 * there is also a filter ancestor that filters out all the extended
818
 * domain elements.
819
 *
820
 * Otherwise, we collect all filters reaching the node,
821
 * intersected with the root domain in collect_filter_prefix.
822
 */
823
__isl_give isl_union_set *isl_schedule_node_get_domain(
824
  __isl_keep isl_schedule_node *node)
825
32
{
826
32
  int n;
827
32
  struct isl_schedule_node_get_filter_prefix_data data;
828
32
829
32
  if (!node)
830
0
    return NULL;
831
32
832
32
  if (node->tree == node->schedule->root) {
833
0
    isl_space *space;
834
0
835
0
    space = isl_schedule_get_space(node->schedule);
836
0
    return isl_union_set_empty(space);
837
0
  }
838
32
839
32
  data.initialized = 0;
840
32
  data.universe_domain = 0;
841
32
  data.universe_filter = 0;
842
32
  data.collect_prefix = 0;
843
32
  data.filter = NULL;
844
32
  data.prefix = NULL;
845
32
846
32
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
847
32
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
848
0
    data.filter = isl_union_set_free(data.filter);
849
32
850
32
  return data.filter;
851
32
}
852
853
/* Return the union of universe sets of the domain elements that reach "node".
854
 *
855
 * If "node" is pointing at the root of the schedule tree, then
856
 * there are no domain elements reaching the current node, so
857
 * we return an empty result.
858
 *
859
 * Otherwise, we collect the universes of all filters reaching the node
860
 * in collect_filter_prefix.
861
 */
862
__isl_give isl_union_set *isl_schedule_node_get_universe_domain(
863
  __isl_keep isl_schedule_node *node)
864
4.02k
{
865
4.02k
  int n;
866
4.02k
  struct isl_schedule_node_get_filter_prefix_data data;
867
4.02k
868
4.02k
  if (!node)
869
0
    return NULL;
870
4.02k
871
4.02k
  if (node->tree == node->schedule->root) {
872
0
    isl_space *space;
873
0
874
0
    space = isl_schedule_get_space(node->schedule);
875
0
    return isl_union_set_empty(space);
876
0
  }
877
4.02k
878
4.02k
  data.initialized = 0;
879
4.02k
  data.universe_domain = 1;
880
4.02k
  data.universe_filter = 1;
881
4.02k
  data.collect_prefix = 0;
882
4.02k
  data.filter = NULL;
883
4.02k
  data.prefix = NULL;
884
4.02k
885
4.02k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
886
4.02k
  if (collect_filter_prefix(node->ancestors, n, &data) < 0)
887
0
    data.filter = isl_union_set_free(data.filter);
888
4.02k
889
4.02k
  return data.filter;
890
4.02k
}
891
892
/* Return the subtree schedule of "node".
893
 *
894
 * Since isl_schedule_tree_get_subtree_schedule_union_map does not handle
895
 * trees that do not contain any schedule information, we first
896
 * move down to the first relevant descendant and handle leaves ourselves.
897
 *
898
 * If the subtree rooted at "node" contains any expansion nodes, then
899
 * the returned subtree schedule is formulated in terms of the expanded
900
 * domains.
901
 * The subtree is not allowed to contain any extension nodes.
902
 */
903
__isl_give isl_union_map *isl_schedule_node_get_subtree_schedule_union_map(
904
  __isl_keep isl_schedule_node *node)
905
1.51k
{
906
1.51k
  isl_schedule_tree *tree, *leaf;
907
1.51k
  isl_union_map *umap;
908
1.51k
909
1.51k
  tree = isl_schedule_node_get_tree(node);
910
1.51k
  leaf = isl_schedule_node_peek_leaf(node);
911
1.51k
  tree = isl_schedule_tree_first_schedule_descendant(tree, leaf);
912
1.51k
  if (!tree)
913
0
    return NULL;
914
1.51k
  if (tree == leaf) {
915
25
    isl_union_set *domain;
916
25
    domain = isl_schedule_node_get_universe_domain(node);
917
25
    isl_schedule_tree_free(tree);
918
25
    return isl_union_map_from_domain(domain);
919
25
  }
920
1.49k
921
1.49k
  umap = isl_schedule_tree_get_subtree_schedule_union_map(tree);
922
1.49k
  isl_schedule_tree_free(tree);
923
1.49k
  return umap;
924
1.49k
}
925
926
/* Return the number of ancestors of "node" in its schedule tree.
927
 */
928
int isl_schedule_node_get_tree_depth(__isl_keep isl_schedule_node *node)
929
472k
{
930
472k
  if (!node)
931
0
    return -1;
932
472k
  return isl_schedule_tree_list_n_schedule_tree(node->ancestors);
933
472k
}
934
935
/* Does "node" have a parent?
936
 *
937
 * That is, does it point to any node of the schedule other than the root?
938
 */
939
isl_bool isl_schedule_node_has_parent(__isl_keep isl_schedule_node *node)
940
91.0k
{
941
91.0k
  if (!node)
942
0
    return isl_bool_error;
943
91.0k
  if (!node->ancestors)
944
0
    return isl_bool_error;
945
91.0k
946
91.0k
  return isl_schedule_tree_list_n_schedule_tree(node->ancestors) != 0;
947
91.0k
}
948
949
/* Return the position of "node" among the children of its parent.
950
 */
951
int isl_schedule_node_get_child_position(__isl_keep isl_schedule_node *node)
952
0
{
953
0
  int n;
954
0
  int has_parent;
955
0
956
0
  if (!node)
957
0
    return -1;
958
0
  has_parent = isl_schedule_node_has_parent(node);
959
0
  if (has_parent < 0)
960
0
    return -1;
961
0
  if (!has_parent)
962
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
963
0
      "node has no parent", return -1);
964
0
965
0
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
966
0
  return node->child_pos[n - 1];
967
0
}
968
969
/* Does the parent (if any) of "node" have any children with a smaller child
970
 * position than this one?
971
 */
972
isl_bool isl_schedule_node_has_previous_sibling(
973
  __isl_keep isl_schedule_node *node)
974
0
{
975
0
  int n;
976
0
  isl_bool has_parent;
977
0
978
0
  if (!node)
979
0
    return isl_bool_error;
980
0
  has_parent = isl_schedule_node_has_parent(node);
981
0
  if (has_parent < 0 || !has_parent)
982
0
    return has_parent;
983
0
984
0
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
985
0
986
0
  return node->child_pos[n - 1] > 0;
987
0
}
988
989
/* Does the parent (if any) of "node" have any children with a greater child
990
 * position than this one?
991
 */
992
isl_bool isl_schedule_node_has_next_sibling(__isl_keep isl_schedule_node *node)
993
50.3k
{
994
50.3k
  int n, n_child;
995
50.3k
  isl_bool has_parent;
996
50.3k
  isl_schedule_tree *tree;
997
50.3k
998
50.3k
  if (!node)
999
0
    return isl_bool_error;
1000
50.3k
  has_parent = isl_schedule_node_has_parent(node);
1001
50.3k
  if (has_parent < 0 || !has_parent)
1002
0
    return has_parent;
1003
50.3k
1004
50.3k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
1005
50.3k
  tree = isl_schedule_tree_list_get_schedule_tree(node->ancestors, n - 1);
1006
50.3k
  if (!tree)
1007
0
    return isl_bool_error;
1008
50.3k
  n_child = isl_schedule_tree_list_n_schedule_tree(tree->children);
1009
50.3k
  isl_schedule_tree_free(tree);
1010
50.3k
1011
50.3k
  return node->child_pos[n - 1] + 1 < n_child;
1012
50.3k
}
1013
1014
/* Does "node" have any children?
1015
 *
1016
 * Any node other than the leaf nodes is considered to have at least
1017
 * one child, even if the corresponding isl_schedule_tree does not
1018
 * have any children.
1019
 */
1020
isl_bool isl_schedule_node_has_children(__isl_keep isl_schedule_node *node)
1021
82.3k
{
1022
82.3k
  if (!node)
1023
0
    return isl_bool_error;
1024
82.3k
  return !isl_schedule_tree_is_leaf(node->tree);
1025
82.3k
}
1026
1027
/* Return the number of children of "node"?
1028
 *
1029
 * Any node other than the leaf nodes is considered to have at least
1030
 * one child, even if the corresponding isl_schedule_tree does not
1031
 * have any children.  That is, the number of children of "node" is
1032
 * only zero if its tree is the explicit empty tree.  Otherwise,
1033
 * if the isl_schedule_tree has any children, then it is equal
1034
 * to the number of children of "node".  If it has zero children,
1035
 * then "node" still has a leaf node as child.
1036
 */
1037
int isl_schedule_node_n_children(__isl_keep isl_schedule_node *node)
1038
816
{
1039
816
  int n;
1040
816
1041
816
  if (!node)
1042
0
    return -1;
1043
816
1044
816
  if (isl_schedule_tree_is_leaf(node->tree))
1045
0
    return 0;
1046
816
1047
816
  n = isl_schedule_tree_n_children(node->tree);
1048
816
  if (n == 0)
1049
212
    return 1;
1050
604
1051
604
  return n;
1052
604
}
1053
1054
/* Move the "node" pointer to the ancestor of the given generation
1055
 * of the node it currently points to, where generation 0 is the node
1056
 * itself and generation 1 is its parent.
1057
 */
1058
__isl_give isl_schedule_node *isl_schedule_node_ancestor(
1059
  __isl_take isl_schedule_node *node, int generation)
1060
66.8k
{
1061
66.8k
  int n;
1062
66.8k
  isl_schedule_tree *tree;
1063
66.8k
1064
66.8k
  if (!node)
1065
0
    return NULL;
1066
66.8k
  if (generation == 0)
1067
0
    return node;
1068
66.8k
  n = isl_schedule_node_get_tree_depth(node);
1069
66.8k
  if (n < 0)
1070
0
    return isl_schedule_node_free(node);
1071
66.8k
  if (generation < 0 || generation > n)
1072
66.8k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1073
66.8k
      "generation out of bounds",
1074
66.8k
      return isl_schedule_node_free(node));
1075
66.8k
  node = isl_schedule_node_cow(node);
1076
66.8k
  if (!node)
1077
0
    return NULL;
1078
66.8k
1079
66.8k
  tree = isl_schedule_tree_list_get_schedule_tree(node->ancestors,
1080
66.8k
              n - generation);
1081
66.8k
  isl_schedule_tree_free(node->tree);
1082
66.8k
  node->tree = tree;
1083
66.8k
  node->ancestors = isl_schedule_tree_list_drop(node->ancestors,
1084
66.8k
                n - generation, generation);
1085
66.8k
  if (!node->ancestors || !node->tree)
1086
0
    return isl_schedule_node_free(node);
1087
66.8k
1088
66.8k
  return node;
1089
66.8k
}
1090
1091
/* Move the "node" pointer to the parent of the node it currently points to.
1092
 */
1093
__isl_give isl_schedule_node *isl_schedule_node_parent(
1094
  __isl_take isl_schedule_node *node)
1095
34.7k
{
1096
34.7k
  if (!node)
1097
0
    return NULL;
1098
34.7k
  if (!isl_schedule_node_has_parent(node))
1099
34.7k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1100
34.7k
      "node has no parent",
1101
34.7k
      return isl_schedule_node_free(node));
1102
34.7k
  return isl_schedule_node_ancestor(node, 1);
1103
34.7k
}
1104
1105
/* Move the "node" pointer to the root of its schedule tree.
1106
 */
1107
__isl_give isl_schedule_node *isl_schedule_node_root(
1108
  __isl_take isl_schedule_node *node)
1109
1
{
1110
1
  int n;
1111
1
1112
1
  if (!node)
1113
0
    return NULL;
1114
1
  n = isl_schedule_node_get_tree_depth(node);
1115
1
  if (n < 0)
1116
0
    return isl_schedule_node_free(node);
1117
1
  return isl_schedule_node_ancestor(node, n);
1118
1
}
1119
1120
/* Move the "node" pointer to the child at position "pos" of the node
1121
 * it currently points to.
1122
 */
1123
__isl_give isl_schedule_node *isl_schedule_node_child(
1124
  __isl_take isl_schedule_node *node, int pos)
1125
42.0k
{
1126
42.0k
  int n;
1127
42.0k
  isl_ctx *ctx;
1128
42.0k
  isl_schedule_tree *tree;
1129
42.0k
  int *child_pos;
1130
42.0k
1131
42.0k
  node = isl_schedule_node_cow(node);
1132
42.0k
  if (!node)
1133
0
    return NULL;
1134
42.0k
  if (!isl_schedule_node_has_children(node))
1135
42.0k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1136
42.0k
      "node has no children",
1137
42.0k
      return isl_schedule_node_free(node));
1138
42.0k
1139
42.0k
  ctx = isl_schedule_node_get_ctx(node);
1140
42.0k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
1141
42.0k
  child_pos = isl_realloc_array(ctx, node->child_pos, int, n + 1);
1142
42.0k
  if (!child_pos)
1143
0
    return isl_schedule_node_free(node);
1144
42.0k
  node->child_pos = child_pos;
1145
42.0k
  node->child_pos[n] = pos;
1146
42.0k
1147
42.0k
  node->ancestors = isl_schedule_tree_list_add(node->ancestors,
1148
42.0k
        isl_schedule_tree_copy(node->tree));
1149
42.0k
  tree = node->tree;
1150
42.0k
  if (isl_schedule_tree_has_children(tree))
1151
23.0k
    tree = isl_schedule_tree_get_child(tree, pos);
1152
18.9k
  else
1153
18.9k
    tree = isl_schedule_node_get_leaf(node);
1154
42.0k
  isl_schedule_tree_free(node->tree);
1155
42.0k
  node->tree = tree;
1156
42.0k
1157
42.0k
  if (!node->tree || !node->ancestors)
1158
0
    return isl_schedule_node_free(node);
1159
42.0k
1160
42.0k
  return node;
1161
42.0k
}
1162
1163
/* Move the "node" pointer to the first child of the node
1164
 * it currently points to.
1165
 */
1166
__isl_give isl_schedule_node *isl_schedule_node_first_child(
1167
  __isl_take isl_schedule_node *node)
1168
33.2k
{
1169
33.2k
  return isl_schedule_node_child(node, 0);
1170
33.2k
}
1171
1172
/* Move the "node" pointer to the child of this node's parent in
1173
 * the previous child position.
1174
 */
1175
__isl_give isl_schedule_node *isl_schedule_node_previous_sibling(
1176
  __isl_take isl_schedule_node *node)
1177
0
{
1178
0
  int n;
1179
0
  isl_schedule_tree *parent, *tree;
1180
0
1181
0
  node = isl_schedule_node_cow(node);
1182
0
  if (!node)
1183
0
    return NULL;
1184
0
  if (!isl_schedule_node_has_previous_sibling(node))
1185
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1186
0
      "node has no previous sibling",
1187
0
      return isl_schedule_node_free(node));
1188
0
1189
0
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
1190
0
  parent = isl_schedule_tree_list_get_schedule_tree(node->ancestors,
1191
0
                  n - 1);
1192
0
  if (!parent)
1193
0
    return isl_schedule_node_free(node);
1194
0
  node->child_pos[n - 1]--;
1195
0
  tree = isl_schedule_tree_list_get_schedule_tree(parent->children,
1196
0
              node->child_pos[n - 1]);
1197
0
  isl_schedule_tree_free(parent);
1198
0
  if (!tree)
1199
0
    return isl_schedule_node_free(node);
1200
0
  isl_schedule_tree_free(node->tree);
1201
0
  node->tree = tree;
1202
0
1203
0
  return node;
1204
0
}
1205
1206
/* Move the "node" pointer to the child of this node's parent in
1207
 * the next child position.
1208
 */
1209
__isl_give isl_schedule_node *isl_schedule_node_next_sibling(
1210
  __isl_take isl_schedule_node *node)
1211
8.45k
{
1212
8.45k
  int n;
1213
8.45k
  isl_schedule_tree *parent, *tree;
1214
8.45k
1215
8.45k
  node = isl_schedule_node_cow(node);
1216
8.45k
  if (!node)
1217
0
    return NULL;
1218
8.45k
  if (!isl_schedule_node_has_next_sibling(node))
1219
8.45k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1220
8.45k
      "node has no next sibling",
1221
8.45k
      return isl_schedule_node_free(node));
1222
8.45k
1223
8.45k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
1224
8.45k
  parent = isl_schedule_tree_list_get_schedule_tree(node->ancestors,
1225
8.45k
                  n - 1);
1226
8.45k
  if (!parent)
1227
0
    return isl_schedule_node_free(node);
1228
8.45k
  node->child_pos[n - 1]++;
1229
8.45k
  tree = isl_schedule_tree_list_get_schedule_tree(parent->children,
1230
8.45k
              node->child_pos[n - 1]);
1231
8.45k
  isl_schedule_tree_free(parent);
1232
8.45k
  if (!tree)
1233
0
    return isl_schedule_node_free(node);
1234
8.45k
  isl_schedule_tree_free(node->tree);
1235
8.45k
  node->tree = tree;
1236
8.45k
1237
8.45k
  return node;
1238
8.45k
}
1239
1240
/* Return a copy to the child at position "pos" of "node".
1241
 */
1242
__isl_give isl_schedule_node *isl_schedule_node_get_child(
1243
  __isl_keep isl_schedule_node *node, int pos)
1244
602
{
1245
602
  return isl_schedule_node_child(isl_schedule_node_copy(node), pos);
1246
602
}
1247
1248
/* Traverse the descendant of "node" in depth-first order, including
1249
 * "node" itself.  Call "enter" whenever a node is entered and "leave"
1250
 * whenever a node is left.  The callback "enter" is responsible
1251
 * for moving to the deepest initial subtree of its argument that
1252
 * should be traversed.
1253
 */
1254
static __isl_give isl_schedule_node *traverse(
1255
  __isl_take isl_schedule_node *node,
1256
  __isl_give isl_schedule_node *(*enter)(
1257
    __isl_take isl_schedule_node *node, void *user),
1258
  __isl_give isl_schedule_node *(*leave)(
1259
    __isl_take isl_schedule_node *node, void *user),
1260
  void *user)
1261
7.06k
{
1262
7.06k
  int depth;
1263
7.06k
1264
7.06k
  if (!node)
1265
0
    return NULL;
1266
7.06k
1267
7.06k
  depth = isl_schedule_node_get_tree_depth(node);
1268
15.5k
  do {
1269
15.5k
    node = enter(node, user);
1270
15.5k
    node = leave(node, user);
1271
48.9k
    while (node && 
isl_schedule_node_get_tree_depth(node) > depth48.9k
&&
1272
48.9k
        
!isl_schedule_node_has_next_sibling(node)41.8k
) {
1273
33.4k
      node = isl_schedule_node_parent(node);
1274
33.4k
      node = leave(node, user);
1275
33.4k
    }
1276
15.5k
    if (node && 
isl_schedule_node_get_tree_depth(node) > depth15.4k
)
1277
8.45k
      node = isl_schedule_node_next_sibling(node);
1278
15.5k
  } while (node && 
isl_schedule_node_get_tree_depth(node) > depth15.4k
);
1279
7.06k
1280
7.06k
  return node;
1281
7.06k
}
1282
1283
/* Internal data structure for isl_schedule_node_foreach_descendant_top_down.
1284
 *
1285
 * "fn" is the user-specified callback function.
1286
 * "user" is the user-specified argument for the callback.
1287
 */
1288
struct isl_schedule_node_preorder_data {
1289
  isl_bool (*fn)(__isl_keep isl_schedule_node *node, void *user);
1290
  void *user;
1291
};
1292
1293
/* Callback for "traverse" to enter a node and to move
1294
 * to the deepest initial subtree that should be traversed
1295
 * for use in a preorder visit.
1296
 *
1297
 * If the user callback returns a negative value, then we abort
1298
 * the traversal.  If this callback returns zero, then we skip
1299
 * the subtree rooted at the current node.  Otherwise, we move
1300
 * down to the first child and repeat the process until a leaf
1301
 * is reached.
1302
 */
1303
static __isl_give isl_schedule_node *preorder_enter(
1304
  __isl_take isl_schedule_node *node, void *user)
1305
13.8k
{
1306
13.8k
  struct isl_schedule_node_preorder_data *data = user;
1307
13.8k
1308
13.8k
  if (!node)
1309
0
    return NULL;
1310
13.8k
1311
44.8k
  
do 13.8k
{
1312
44.8k
    isl_bool r;
1313
44.8k
1314
44.8k
    r = data->fn(node, data->user);
1315
44.8k
    if (r < 0)
1316
16
      return isl_schedule_node_free(node);
1317
44.8k
    if (r == isl_bool_false)
1318
7.91k
      return node;
1319
36.8k
  } while (isl_schedule_node_has_children(node) &&
1320
36.8k
    
(node = isl_schedule_node_first_child(node)) != NULL30.9k
);
1321
13.8k
1322
13.8k
  
return node5.90k
;
1323
13.8k
}
1324
1325
/* Callback for "traverse" to leave a node
1326
 * for use in a preorder visit.
1327
 * Since we already visited the node when we entered it,
1328
 * we do not need to do anything here.
1329
 */
1330
static __isl_give isl_schedule_node *preorder_leave(
1331
  __isl_take isl_schedule_node *node, void *user)
1332
44.7k
{
1333
44.7k
  return node;
1334
44.7k
}
1335
1336
/* Traverse the descendants of "node" (including the node itself)
1337
 * in depth first preorder.
1338
 *
1339
 * If "fn" returns isl_bool_error on any of the nodes,
1340
 * then the traversal is aborted.
1341
 * If "fn" returns isl_bool_false on any of the nodes, then the subtree rooted
1342
 * at that node is skipped.
1343
 *
1344
 * Return isl_stat_ok on success and isl_stat_error on failure.
1345
 */
1346
isl_stat isl_schedule_node_foreach_descendant_top_down(
1347
  __isl_keep isl_schedule_node *node,
1348
  isl_bool (*fn)(__isl_keep isl_schedule_node *node, void *user),
1349
  void *user)
1350
6.46k
{
1351
6.46k
  struct isl_schedule_node_preorder_data data = { fn, user };
1352
6.46k
1353
6.46k
  node = isl_schedule_node_copy(node);
1354
6.46k
  node = traverse(node, &preorder_enter, &preorder_leave, &data);
1355
6.46k
  isl_schedule_node_free(node);
1356
6.46k
1357
6.46k
  return node ? 
isl_stat_ok6.44k
:
isl_stat_error16
;
1358
6.46k
}
1359
1360
/* Internal data structure for isl_schedule_node_every_descendant.
1361
 *
1362
 * "test" is the user-specified callback function.
1363
 * "user" is the user-specified callback function argument.
1364
 *
1365
 * "failed" is initialized to 0 and set to 1 if "test" fails
1366
 * on any node.
1367
 */
1368
struct isl_union_map_every_data {
1369
  isl_bool (*test)(__isl_keep isl_schedule_node *node, void *user);
1370
  void *user;
1371
  int failed;
1372
};
1373
1374
/* isl_schedule_node_foreach_descendant_top_down callback
1375
 * that sets data->failed if data->test returns false and
1376
 * subsequently aborts the traversal.
1377
 */
1378
static isl_bool call_every(__isl_keep isl_schedule_node *node, void *user)
1379
0
{
1380
0
  struct isl_union_map_every_data *data = user;
1381
0
  isl_bool r;
1382
0
1383
0
  r = data->test(node, data->user);
1384
0
  if (r < 0)
1385
0
    return isl_bool_error;
1386
0
  if (r)
1387
0
    return isl_bool_true;
1388
0
  data->failed = 1;
1389
0
  return isl_bool_error;
1390
0
}
1391
1392
/* Does "test" succeed on every descendant of "node" (including "node" itself)?
1393
 */
1394
isl_bool isl_schedule_node_every_descendant(__isl_keep isl_schedule_node *node,
1395
  isl_bool (*test)(__isl_keep isl_schedule_node *node, void *user),
1396
  void *user)
1397
0
{
1398
0
  struct isl_union_map_every_data data = { test, user, 0 };
1399
0
  isl_stat r;
1400
0
1401
0
  r = isl_schedule_node_foreach_descendant_top_down(node, &call_every,
1402
0
              &data);
1403
0
  if (r >= 0)
1404
0
    return isl_bool_true;
1405
0
  if (data.failed)
1406
0
    return isl_bool_false;
1407
0
  return isl_bool_error;
1408
0
}
1409
1410
/* Internal data structure for isl_schedule_node_map_descendant_bottom_up.
1411
 *
1412
 * "fn" is the user-specified callback function.
1413
 * "user" is the user-specified argument for the callback.
1414
 */
1415
struct isl_schedule_node_postorder_data {
1416
  __isl_give isl_schedule_node *(*fn)(__isl_take isl_schedule_node *node,
1417
    void *user);
1418
  void *user;
1419
};
1420
1421
/* Callback for "traverse" to enter a node and to move
1422
 * to the deepest initial subtree that should be traversed
1423
 * for use in a postorder visit.
1424
 *
1425
 * Since we are performing a postorder visit, we only need
1426
 * to move to the deepest initial leaf here.
1427
 */
1428
static __isl_give isl_schedule_node *postorder_enter(
1429
  __isl_take isl_schedule_node *node, void *user)
1430
353
{
1431
1.27k
  while (node && isl_schedule_node_has_children(node))
1432
926
    node = isl_schedule_node_first_child(node);
1433
353
1434
353
  return node;
1435
353
}
1436
1437
/* Callback for "traverse" to leave a node
1438
 * for use in a postorder visit.
1439
 *
1440
 * Since we are performing a postorder visit, we need
1441
 * to call the user callback here.
1442
 */
1443
static __isl_give isl_schedule_node *postorder_leave(
1444
  __isl_take isl_schedule_node *node, void *user)
1445
1.60k
{
1446
1.60k
  struct isl_schedule_node_postorder_data *data = user;
1447
1.60k
1448
1.60k
  return data->fn(node, data->user);
1449
1.60k
}
1450
1451
/* Traverse the descendants of "node" (including the node itself)
1452
 * in depth first postorder, allowing the user to modify the visited node.
1453
 * The traversal continues from the node returned by the callback function.
1454
 * It is the responsibility of the user to ensure that this does not
1455
 * lead to an infinite loop.  It is safest to always return a pointer
1456
 * to the same position (same ancestors and child positions) as the input node.
1457
 */
1458
__isl_give isl_schedule_node *isl_schedule_node_map_descendant_bottom_up(
1459
  __isl_take isl_schedule_node *node,
1460
  __isl_give isl_schedule_node *(*fn)(__isl_take isl_schedule_node *node,
1461
    void *user), void *user)
1462
186
{
1463
186
  struct isl_schedule_node_postorder_data data = { fn, user };
1464
186
1465
186
  return traverse(node, &postorder_enter, &postorder_leave, &data);
1466
186
}
1467
1468
/* Traverse the ancestors of "node" from the root down to and including
1469
 * the parent of "node", calling "fn" on each of them.
1470
 *
1471
 * If "fn" returns -1 on any of the nodes, then the traversal is aborted.
1472
 *
1473
 * Return 0 on success and -1 on failure.
1474
 */
1475
isl_stat isl_schedule_node_foreach_ancestor_top_down(
1476
  __isl_keep isl_schedule_node *node,
1477
  isl_stat (*fn)(__isl_keep isl_schedule_node *node, void *user),
1478
  void *user)
1479
0
{
1480
0
  int i, n;
1481
0
1482
0
  if (!node)
1483
0
    return isl_stat_error;
1484
0
1485
0
  n = isl_schedule_node_get_tree_depth(node);
1486
0
  for (i = 0; i < n; ++i) {
1487
0
    isl_schedule_node *ancestor;
1488
0
    isl_stat r;
1489
0
1490
0
    ancestor = isl_schedule_node_copy(node);
1491
0
    ancestor = isl_schedule_node_ancestor(ancestor, n - i);
1492
0
    r = fn(ancestor, user);
1493
0
    isl_schedule_node_free(ancestor);
1494
0
    if (r < 0)
1495
0
      return isl_stat_error;
1496
0
  }
1497
0
1498
0
  return isl_stat_ok;
1499
0
}
1500
1501
/* Is any node in the subtree rooted at "node" anchored?
1502
 * That is, do any of these nodes reference the outer band nodes?
1503
 */
1504
isl_bool isl_schedule_node_is_subtree_anchored(
1505
  __isl_keep isl_schedule_node *node)
1506
3.96k
{
1507
3.96k
  if (!node)
1508
0
    return isl_bool_error;
1509
3.96k
  return isl_schedule_tree_is_subtree_anchored(node->tree);
1510
3.96k
}
1511
1512
/* Return the number of members in the given band node.
1513
 */
1514
unsigned isl_schedule_node_band_n_member(__isl_keep isl_schedule_node *node)
1515
1.62k
{
1516
1.62k
  return node ? isl_schedule_tree_band_n_member(node->tree) : 
00
;
1517
1.62k
}
1518
1519
/* Is the band member at position "pos" of the band node "node"
1520
 * marked coincident?
1521
 */
1522
isl_bool isl_schedule_node_band_member_get_coincident(
1523
  __isl_keep isl_schedule_node *node, int pos)
1524
813
{
1525
813
  if (!node)
1526
0
    return isl_bool_error;
1527
813
  return isl_schedule_tree_band_member_get_coincident(node->tree, pos);
1528
813
}
1529
1530
/* Mark the band member at position "pos" the band node "node"
1531
 * as being coincident or not according to "coincident".
1532
 */
1533
__isl_give isl_schedule_node *isl_schedule_node_band_member_set_coincident(
1534
  __isl_take isl_schedule_node *node, int pos, int coincident)
1535
279
{
1536
279
  int c;
1537
279
  isl_schedule_tree *tree;
1538
279
1539
279
  if (!node)
1540
0
    return NULL;
1541
279
  c = isl_schedule_node_band_member_get_coincident(node, pos);
1542
279
  if (c == coincident)
1543
92
    return node;
1544
187
1545
187
  tree = isl_schedule_tree_copy(node->tree);
1546
187
  tree = isl_schedule_tree_band_member_set_coincident(tree, pos,
1547
187
                  coincident);
1548
187
  node = isl_schedule_node_graft_tree(node, tree);
1549
187
1550
187
  return node;
1551
187
}
1552
1553
/* Is the band node "node" marked permutable?
1554
 */
1555
isl_bool isl_schedule_node_band_get_permutable(
1556
  __isl_keep isl_schedule_node *node)
1557
656
{
1558
656
  if (!node)
1559
0
    return isl_bool_error;
1560
656
1561
656
  return isl_schedule_tree_band_get_permutable(node->tree);
1562
656
}
1563
1564
/* Mark the band node "node" permutable or not according to "permutable"?
1565
 */
1566
__isl_give isl_schedule_node *isl_schedule_node_band_set_permutable(
1567
  __isl_take isl_schedule_node *node, int permutable)
1568
172
{
1569
172
  isl_schedule_tree *tree;
1570
172
1571
172
  if (!node)
1572
0
    return NULL;
1573
172
  if (isl_schedule_node_band_get_permutable(node) == permutable)
1574
30
    return node;
1575
142
1576
142
  tree = isl_schedule_tree_copy(node->tree);
1577
142
  tree = isl_schedule_tree_band_set_permutable(tree, permutable);
1578
142
  node = isl_schedule_node_graft_tree(node, tree);
1579
142
1580
142
  return node;
1581
142
}
1582
1583
/* Return the schedule space of the band node.
1584
 */
1585
__isl_give isl_space *isl_schedule_node_band_get_space(
1586
  __isl_keep isl_schedule_node *node)
1587
308
{
1588
308
  if (!node)
1589
0
    return NULL;
1590
308
1591
308
  return isl_schedule_tree_band_get_space(node->tree);
1592
308
}
1593
1594
/* Return the schedule of the band node in isolation.
1595
 */
1596
__isl_give isl_multi_union_pw_aff *isl_schedule_node_band_get_partial_schedule(
1597
  __isl_keep isl_schedule_node *node)
1598
716
{
1599
716
  if (!node)
1600
0
    return NULL;
1601
716
1602
716
  return isl_schedule_tree_band_get_partial_schedule(node->tree);
1603
716
}
1604
1605
/* Return the schedule of the band node in isolation in the form of
1606
 * an isl_union_map.
1607
 *
1608
 * If the band does not have any members, then we construct a universe map
1609
 * with the universe of the domain elements reaching the node as domain.
1610
 * Otherwise, we extract an isl_multi_union_pw_aff representation and
1611
 * convert that to an isl_union_map.
1612
 */
1613
__isl_give isl_union_map *isl_schedule_node_band_get_partial_schedule_union_map(
1614
  __isl_keep isl_schedule_node *node)
1615
36
{
1616
36
  isl_multi_union_pw_aff *mupa;
1617
36
1618
36
  if (!node)
1619
0
    return NULL;
1620
36
1621
36
  if (isl_schedule_node_get_type(node) != isl_schedule_node_band)
1622
36
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1623
36
      "not a band node", return NULL);
1624
36
  if (isl_schedule_node_band_n_member(node) == 0) {
1625
0
    isl_union_set *domain;
1626
0
1627
0
    domain = isl_schedule_node_get_universe_domain(node);
1628
0
    return isl_union_map_from_domain(domain);
1629
0
  }
1630
36
1631
36
  mupa = isl_schedule_node_band_get_partial_schedule(node);
1632
36
  return isl_union_map_from_multi_union_pw_aff(mupa);
1633
36
}
1634
1635
/* Return the loop AST generation type for the band member of band node "node"
1636
 * at position "pos".
1637
 */
1638
enum isl_ast_loop_type isl_schedule_node_band_member_get_ast_loop_type(
1639
  __isl_keep isl_schedule_node *node, int pos)
1640
791
{
1641
791
  if (!node)
1642
0
    return isl_ast_loop_error;
1643
791
1644
791
  return isl_schedule_tree_band_member_get_ast_loop_type(node->tree, pos);
1645
791
}
1646
1647
/* Set the loop AST generation type for the band member of band node "node"
1648
 * at position "pos" to "type".
1649
 */
1650
__isl_give isl_schedule_node *isl_schedule_node_band_member_set_ast_loop_type(
1651
  __isl_take isl_schedule_node *node, int pos,
1652
  enum isl_ast_loop_type type)
1653
0
{
1654
0
  isl_schedule_tree *tree;
1655
0
1656
0
  if (!node)
1657
0
    return NULL;
1658
0
1659
0
  tree = isl_schedule_tree_copy(node->tree);
1660
0
  tree = isl_schedule_tree_band_member_set_ast_loop_type(tree, pos, type);
1661
0
  return isl_schedule_node_graft_tree(node, tree);
1662
0
}
1663
1664
/* Return the loop AST generation type for the band member of band node "node"
1665
 * at position "pos" for the isolated part.
1666
 */
1667
enum isl_ast_loop_type isl_schedule_node_band_member_get_isolate_ast_loop_type(
1668
  __isl_keep isl_schedule_node *node, int pos)
1669
913
{
1670
913
  if (!node)
1671
0
    return isl_ast_loop_error;
1672
913
1673
913
  return isl_schedule_tree_band_member_get_isolate_ast_loop_type(
1674
913
                  node->tree, pos);
1675
913
}
1676
1677
/* Set the loop AST generation type for the band member of band node "node"
1678
 * at position "pos" for the isolated part to "type".
1679
 */
1680
__isl_give isl_schedule_node *
1681
isl_schedule_node_band_member_set_isolate_ast_loop_type(
1682
  __isl_take isl_schedule_node *node, int pos,
1683
  enum isl_ast_loop_type type)
1684
0
{
1685
0
  isl_schedule_tree *tree;
1686
0
1687
0
  if (!node)
1688
0
    return NULL;
1689
0
1690
0
  tree = isl_schedule_tree_copy(node->tree);
1691
0
  tree = isl_schedule_tree_band_member_set_isolate_ast_loop_type(tree,
1692
0
                    pos, type);
1693
0
  return isl_schedule_node_graft_tree(node, tree);
1694
0
}
1695
1696
/* Return the AST build options associated to band node "node".
1697
 */
1698
__isl_give isl_union_set *isl_schedule_node_band_get_ast_build_options(
1699
  __isl_keep isl_schedule_node *node)
1700
0
{
1701
0
  if (!node)
1702
0
    return NULL;
1703
0
1704
0
  return isl_schedule_tree_band_get_ast_build_options(node->tree);
1705
0
}
1706
1707
/* Replace the AST build options associated to band node "node" by "options".
1708
 */
1709
__isl_give isl_schedule_node *isl_schedule_node_band_set_ast_build_options(
1710
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *options)
1711
72
{
1712
72
  isl_schedule_tree *tree;
1713
72
1714
72
  if (!node || !options)
1715
0
    goto error;
1716
72
1717
72
  tree = isl_schedule_tree_copy(node->tree);
1718
72
  tree = isl_schedule_tree_band_set_ast_build_options(tree, options);
1719
72
  return isl_schedule_node_graft_tree(node, tree);
1720
0
error:
1721
0
  isl_schedule_node_free(node);
1722
0
  isl_union_set_free(options);
1723
0
  return NULL;
1724
72
}
1725
1726
/* Return the "isolate" option associated to band node "node".
1727
 */
1728
__isl_give isl_set *isl_schedule_node_band_get_ast_isolate_option(
1729
  __isl_keep isl_schedule_node *node)
1730
2.32k
{
1731
2.32k
  int depth;
1732
2.32k
1733
2.32k
  if (!node)
1734
0
    return NULL;
1735
2.32k
1736
2.32k
  depth = isl_schedule_node_get_schedule_depth(node);
1737
2.32k
  return isl_schedule_tree_band_get_ast_isolate_option(node->tree, depth);
1738
2.32k
}
1739
1740
/* Make sure that that spaces of "node" and "mv" are the same.
1741
 * Return -1 on error, reporting the error to the user.
1742
 */
1743
static int check_space_multi_val(__isl_keep isl_schedule_node *node,
1744
  __isl_keep isl_multi_val *mv)
1745
78
{
1746
78
  isl_space *node_space, *mv_space;
1747
78
  int equal;
1748
78
1749
78
  node_space = isl_schedule_node_band_get_space(node);
1750
78
  mv_space = isl_multi_val_get_space(mv);
1751
78
  equal = isl_space_tuple_is_equal(node_space, isl_dim_set,
1752
78
          mv_space, isl_dim_set);
1753
78
  isl_space_free(mv_space);
1754
78
  isl_space_free(node_space);
1755
78
  if (equal < 0)
1756
0
    return -1;
1757
78
  if (!equal)
1758
78
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1759
78
      "spaces don't match", return -1);
1760
78
1761
78
  return 0;
1762
78
}
1763
1764
/* Multiply the partial schedule of the band node "node"
1765
 * with the factors in "mv".
1766
 */
1767
__isl_give isl_schedule_node *isl_schedule_node_band_scale(
1768
  __isl_take isl_schedule_node *node, __isl_take isl_multi_val *mv)
1769
0
{
1770
0
  isl_schedule_tree *tree;
1771
0
  int anchored;
1772
0
1773
0
  if (!node || !mv)
1774
0
    goto error;
1775
0
  if (check_space_multi_val(node, mv) < 0)
1776
0
    goto error;
1777
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
1778
0
  if (anchored < 0)
1779
0
    goto error;
1780
0
  if (anchored)
1781
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1782
0
      "cannot scale band node with anchored subtree",
1783
0
      goto error);
1784
0
1785
0
  tree = isl_schedule_node_get_tree(node);
1786
0
  tree = isl_schedule_tree_band_scale(tree, mv);
1787
0
  return isl_schedule_node_graft_tree(node, tree);
1788
0
error:
1789
0
  isl_multi_val_free(mv);
1790
0
  isl_schedule_node_free(node);
1791
0
  return NULL;
1792
0
}
1793
1794
/* Divide the partial schedule of the band node "node"
1795
 * by the factors in "mv".
1796
 */
1797
__isl_give isl_schedule_node *isl_schedule_node_band_scale_down(
1798
  __isl_take isl_schedule_node *node, __isl_take isl_multi_val *mv)
1799
0
{
1800
0
  isl_schedule_tree *tree;
1801
0
  int anchored;
1802
0
1803
0
  if (!node || !mv)
1804
0
    goto error;
1805
0
  if (check_space_multi_val(node, mv) < 0)
1806
0
    goto error;
1807
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
1808
0
  if (anchored < 0)
1809
0
    goto error;
1810
0
  if (anchored)
1811
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1812
0
      "cannot scale down band node with anchored subtree",
1813
0
      goto error);
1814
0
1815
0
  tree = isl_schedule_node_get_tree(node);
1816
0
  tree = isl_schedule_tree_band_scale_down(tree, mv);
1817
0
  return isl_schedule_node_graft_tree(node, tree);
1818
0
error:
1819
0
  isl_multi_val_free(mv);
1820
0
  isl_schedule_node_free(node);
1821
0
  return NULL;
1822
0
}
1823
1824
/* Reduce the partial schedule of the band node "node"
1825
 * modulo the factors in "mv".
1826
 */
1827
__isl_give isl_schedule_node *isl_schedule_node_band_mod(
1828
  __isl_take isl_schedule_node *node, __isl_take isl_multi_val *mv)
1829
0
{
1830
0
  isl_schedule_tree *tree;
1831
0
  isl_bool anchored;
1832
0
1833
0
  if (!node || !mv)
1834
0
    goto error;
1835
0
  if (check_space_multi_val(node, mv) < 0)
1836
0
    goto error;
1837
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
1838
0
  if (anchored < 0)
1839
0
    goto error;
1840
0
  if (anchored)
1841
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1842
0
      "cannot perform mod on band node with anchored subtree",
1843
0
      goto error);
1844
0
1845
0
  tree = isl_schedule_node_get_tree(node);
1846
0
  tree = isl_schedule_tree_band_mod(tree, mv);
1847
0
  return isl_schedule_node_graft_tree(node, tree);
1848
0
error:
1849
0
  isl_multi_val_free(mv);
1850
0
  isl_schedule_node_free(node);
1851
0
  return NULL;
1852
0
}
1853
1854
/* Make sure that that spaces of "node" and "mupa" are the same.
1855
 * Return isl_stat_error on error, reporting the error to the user.
1856
 */
1857
static isl_stat check_space_multi_union_pw_aff(
1858
  __isl_keep isl_schedule_node *node,
1859
  __isl_keep isl_multi_union_pw_aff *mupa)
1860
0
{
1861
0
  isl_space *node_space, *mupa_space;
1862
0
  isl_bool equal;
1863
0
1864
0
  node_space = isl_schedule_node_band_get_space(node);
1865
0
  mupa_space = isl_multi_union_pw_aff_get_space(mupa);
1866
0
  equal = isl_space_tuple_is_equal(node_space, isl_dim_set,
1867
0
          mupa_space, isl_dim_set);
1868
0
  isl_space_free(mupa_space);
1869
0
  isl_space_free(node_space);
1870
0
  if (equal < 0)
1871
0
    return isl_stat_error;
1872
0
  if (!equal)
1873
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1874
0
      "spaces don't match", return isl_stat_error);
1875
0
1876
0
  return isl_stat_ok;
1877
0
}
1878
1879
/* Shift the partial schedule of the band node "node" by "shift".
1880
 */
1881
__isl_give isl_schedule_node *isl_schedule_node_band_shift(
1882
  __isl_take isl_schedule_node *node,
1883
  __isl_take isl_multi_union_pw_aff *shift)
1884
0
{
1885
0
  isl_schedule_tree *tree;
1886
0
  int anchored;
1887
0
1888
0
  if (!node || !shift)
1889
0
    goto error;
1890
0
  if (check_space_multi_union_pw_aff(node, shift) < 0)
1891
0
    goto error;
1892
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
1893
0
  if (anchored < 0)
1894
0
    goto error;
1895
0
  if (anchored)
1896
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1897
0
      "cannot shift band node with anchored subtree",
1898
0
      goto error);
1899
0
1900
0
  tree = isl_schedule_node_get_tree(node);
1901
0
  tree = isl_schedule_tree_band_shift(tree, shift);
1902
0
  return isl_schedule_node_graft_tree(node, tree);
1903
0
error:
1904
0
  isl_multi_union_pw_aff_free(shift);
1905
0
  isl_schedule_node_free(node);
1906
0
  return NULL;
1907
0
}
1908
1909
/* Tile "node" with tile sizes "sizes".
1910
 *
1911
 * The current node is replaced by two nested nodes corresponding
1912
 * to the tile dimensions and the point dimensions.
1913
 *
1914
 * Return a pointer to the outer (tile) node.
1915
 *
1916
 * If any of the descendants of "node" depend on the set of outer band nodes,
1917
 * then we refuse to tile the node.
1918
 *
1919
 * If the scale tile loops option is set, then the tile loops
1920
 * are scaled by the tile sizes.  If the shift point loops option is set,
1921
 * then the point loops are shifted to start at zero.
1922
 * In particular, these options affect the tile and point loop schedules
1923
 * as follows
1924
 *
1925
 *  scale shift original  tile    point
1926
 *
1927
 *  0 0 i   floor(i/s)  i
1928
 *  1 0 i   s * floor(i/s)  i
1929
 *  0 1 i   floor(i/s)  i - s * floor(i/s)
1930
 *  1 1 i   s * floor(i/s)  i - s * floor(i/s)
1931
 */
1932
__isl_give isl_schedule_node *isl_schedule_node_band_tile(
1933
  __isl_take isl_schedule_node *node, __isl_take isl_multi_val *sizes)
1934
78
{
1935
78
  isl_schedule_tree *tree;
1936
78
  int anchored;
1937
78
1938
78
  if (!node || !sizes)
1939
0
    goto error;
1940
78
  anchored = isl_schedule_node_is_subtree_anchored(node);
1941
78
  if (anchored < 0)
1942
0
    goto error;
1943
78
  if (anchored)
1944
78
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1945
78
      "cannot tile band node with anchored subtree",
1946
78
      goto error);
1947
78
1948
78
  if (check_space_multi_val(node, sizes) < 0)
1949
0
    goto error;
1950
78
1951
78
  tree = isl_schedule_node_get_tree(node);
1952
78
  tree = isl_schedule_tree_band_tile(tree, sizes);
1953
78
  return isl_schedule_node_graft_tree(node, tree);
1954
0
error:
1955
0
  isl_multi_val_free(sizes);
1956
0
  isl_schedule_node_free(node);
1957
0
  return NULL;
1958
78
}
1959
1960
/* Move the band node "node" down to all the leaves in the subtree
1961
 * rooted at "node".
1962
 * Return a pointer to the node in the resulting tree that is in the same
1963
 * position as the node pointed to by "node" in the original tree.
1964
 *
1965
 * If the node only has a leaf child, then nothing needs to be done.
1966
 * Otherwise, the child of the node is removed and the result is
1967
 * appended to all the leaves in the subtree rooted at the original child.
1968
 * Since the node is moved to the leaves, it needs to be expanded
1969
 * according to the expansion, if any, defined by that subtree.
1970
 * In the end, the original node is replaced by the result of
1971
 * attaching copies of the expanded node to the leaves.
1972
 *
1973
 * If any of the nodes in the subtree rooted at "node" depend on
1974
 * the set of outer band nodes then we refuse to sink the band node.
1975
 */
1976
__isl_give isl_schedule_node *isl_schedule_node_band_sink(
1977
  __isl_take isl_schedule_node *node)
1978
16
{
1979
16
  enum isl_schedule_node_type type;
1980
16
  isl_schedule_tree *tree, *child;
1981
16
  isl_union_pw_multi_aff *contraction;
1982
16
  int anchored;
1983
16
1984
16
  if (!node)
1985
0
    return NULL;
1986
16
1987
16
  type = isl_schedule_node_get_type(node);
1988
16
  if (type != isl_schedule_node_band)
1989
16
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1990
16
      "not a band node", return isl_schedule_node_free(node));
1991
16
  anchored = isl_schedule_node_is_subtree_anchored(node);
1992
16
  if (anchored < 0)
1993
0
    return isl_schedule_node_free(node);
1994
16
  if (anchored)
1995
16
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
1996
16
      "cannot sink band node in anchored subtree",
1997
16
      return isl_schedule_node_free(node));
1998
16
  if (isl_schedule_tree_n_children(node->tree) == 0)
1999
9
    return node;
2000
7
2001
7
  contraction = isl_schedule_node_get_subtree_contraction(node);
2002
7
2003
7
  tree = isl_schedule_node_get_tree(node);
2004
7
  child = isl_schedule_tree_get_child(tree, 0);
2005
7
  tree = isl_schedule_tree_reset_children(tree);
2006
7
  tree = isl_schedule_tree_pullback_union_pw_multi_aff(tree, contraction);
2007
7
  tree = isl_schedule_tree_append_to_leaves(child, tree);
2008
7
2009
7
  return isl_schedule_node_graft_tree(node, tree);
2010
7
}
2011
2012
/* Split "node" into two nested band nodes, one with the first "pos"
2013
 * dimensions and one with the remaining dimensions.
2014
 * The schedules of the two band nodes live in anonymous spaces.
2015
 * The loop AST generation type options and the isolate option
2016
 * are split over the two band nodes.
2017
 */
2018
__isl_give isl_schedule_node *isl_schedule_node_band_split(
2019
  __isl_take isl_schedule_node *node, int pos)
2020
34
{
2021
34
  int depth;
2022
34
  isl_schedule_tree *tree;
2023
34
2024
34
  depth = isl_schedule_node_get_schedule_depth(node);
2025
34
  tree = isl_schedule_node_get_tree(node);
2026
34
  tree = isl_schedule_tree_band_split(tree, pos, depth);
2027
34
  return isl_schedule_node_graft_tree(node, tree);
2028
34
}
2029
2030
/* Return the context of the context node "node".
2031
 */
2032
__isl_give isl_set *isl_schedule_node_context_get_context(
2033
  __isl_keep isl_schedule_node *node)
2034
0
{
2035
0
  if (!node)
2036
0
    return NULL;
2037
0
2038
0
  return isl_schedule_tree_context_get_context(node->tree);
2039
0
}
2040
2041
/* Return the domain of the domain node "node".
2042
 */
2043
__isl_give isl_union_set *isl_schedule_node_domain_get_domain(
2044
  __isl_keep isl_schedule_node *node)
2045
444
{
2046
444
  if (!node)
2047
0
    return NULL;
2048
444
2049
444
  return isl_schedule_tree_domain_get_domain(node->tree);
2050
444
}
2051
2052
/* Return the expansion map of expansion node "node".
2053
 */
2054
__isl_give isl_union_map *isl_schedule_node_expansion_get_expansion(
2055
  __isl_keep isl_schedule_node *node)
2056
2
{
2057
2
  if (!node)
2058
0
    return NULL;
2059
2
2060
2
  return isl_schedule_tree_expansion_get_expansion(node->tree);
2061
2
}
2062
2063
/* Return the contraction of expansion node "node".
2064
 */
2065
__isl_give isl_union_pw_multi_aff *isl_schedule_node_expansion_get_contraction(
2066
  __isl_keep isl_schedule_node *node)
2067
0
{
2068
0
  if (!node)
2069
0
    return NULL;
2070
0
2071
0
  return isl_schedule_tree_expansion_get_contraction(node->tree);
2072
0
}
2073
2074
/* Replace the contraction and the expansion of the expansion node "node"
2075
 * by "contraction" and "expansion".
2076
 */
2077
__isl_give isl_schedule_node *
2078
isl_schedule_node_expansion_set_contraction_and_expansion(
2079
  __isl_take isl_schedule_node *node,
2080
  __isl_take isl_union_pw_multi_aff *contraction,
2081
  __isl_take isl_union_map *expansion)
2082
0
{
2083
0
  isl_schedule_tree *tree;
2084
0
2085
0
  if (!node || !contraction || !expansion)
2086
0
    goto error;
2087
0
2088
0
  tree = isl_schedule_tree_copy(node->tree);
2089
0
  tree = isl_schedule_tree_expansion_set_contraction_and_expansion(tree,
2090
0
              contraction, expansion);
2091
0
  return isl_schedule_node_graft_tree(node, tree);
2092
0
error:
2093
0
  isl_schedule_node_free(node);
2094
0
  isl_union_pw_multi_aff_free(contraction);
2095
0
  isl_union_map_free(expansion);
2096
0
  return NULL;
2097
0
}
2098
2099
/* Return the extension of the extension node "node".
2100
 */
2101
__isl_give isl_union_map *isl_schedule_node_extension_get_extension(
2102
  __isl_keep isl_schedule_node *node)
2103
46
{
2104
46
  if (!node)
2105
0
    return NULL;
2106
46
2107
46
  return isl_schedule_tree_extension_get_extension(node->tree);
2108
46
}
2109
2110
/* Replace the extension of extension node "node" by "extension".
2111
 */
2112
__isl_give isl_schedule_node *isl_schedule_node_extension_set_extension(
2113
  __isl_take isl_schedule_node *node, __isl_take isl_union_map *extension)
2114
0
{
2115
0
  isl_schedule_tree *tree;
2116
0
2117
0
  if (!node || !extension)
2118
0
    goto error;
2119
0
2120
0
  tree = isl_schedule_tree_copy(node->tree);
2121
0
  tree = isl_schedule_tree_extension_set_extension(tree, extension);
2122
0
  return isl_schedule_node_graft_tree(node, tree);
2123
0
error:
2124
0
  isl_schedule_node_free(node);
2125
0
  isl_union_map_free(extension);
2126
0
  return NULL;
2127
0
}
2128
2129
/* Return the filter of the filter node "node".
2130
 */
2131
__isl_give isl_union_set *isl_schedule_node_filter_get_filter(
2132
  __isl_keep isl_schedule_node *node)
2133
2.97k
{
2134
2.97k
  if (!node)
2135
0
    return NULL;
2136
2.97k
2137
2.97k
  return isl_schedule_tree_filter_get_filter(node->tree);
2138
2.97k
}
2139
2140
/* Replace the filter of filter node "node" by "filter".
2141
 */
2142
__isl_give isl_schedule_node *isl_schedule_node_filter_set_filter(
2143
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
2144
1.10k
{
2145
1.10k
  isl_schedule_tree *tree;
2146
1.10k
2147
1.10k
  if (!node || !filter)
2148
0
    goto error;
2149
1.10k
2150
1.10k
  tree = isl_schedule_tree_copy(node->tree);
2151
1.10k
  tree = isl_schedule_tree_filter_set_filter(tree, filter);
2152
1.10k
  return isl_schedule_node_graft_tree(node, tree);
2153
0
error:
2154
0
  isl_schedule_node_free(node);
2155
0
  isl_union_set_free(filter);
2156
0
  return NULL;
2157
1.10k
}
2158
2159
/* Intersect the filter of filter node "node" with "filter".
2160
 *
2161
 * If the filter of the node is already a subset of "filter",
2162
 * then leave the node unchanged.
2163
 */
2164
__isl_give isl_schedule_node *isl_schedule_node_filter_intersect_filter(
2165
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
2166
0
{
2167
0
  isl_union_set *node_filter = NULL;
2168
0
  isl_bool subset;
2169
0
2170
0
  if (!node || !filter)
2171
0
    goto error;
2172
0
2173
0
  node_filter = isl_schedule_node_filter_get_filter(node);
2174
0
  subset = isl_union_set_is_subset(node_filter, filter);
2175
0
  if (subset < 0)
2176
0
    goto error;
2177
0
  if (subset) {
2178
0
    isl_union_set_free(node_filter);
2179
0
    isl_union_set_free(filter);
2180
0
    return node;
2181
0
  }
2182
0
  node_filter = isl_union_set_intersect(node_filter, filter);
2183
0
  node = isl_schedule_node_filter_set_filter(node, node_filter);
2184
0
  return node;
2185
0
error:
2186
0
  isl_schedule_node_free(node);
2187
0
  isl_union_set_free(node_filter);
2188
0
  isl_union_set_free(filter);
2189
0
  return NULL;
2190
0
}
2191
2192
/* Return the guard of the guard node "node".
2193
 */
2194
__isl_give isl_set *isl_schedule_node_guard_get_guard(
2195
  __isl_keep isl_schedule_node *node)
2196
0
{
2197
0
  if (!node)
2198
0
    return NULL;
2199
0
2200
0
  return isl_schedule_tree_guard_get_guard(node->tree);
2201
0
}
2202
2203
/* Return the mark identifier of the mark node "node".
2204
 */
2205
__isl_give isl_id *isl_schedule_node_mark_get_id(
2206
  __isl_keep isl_schedule_node *node)
2207
154
{
2208
154
  if (!node)
2209
0
    return NULL;
2210
154
2211
154
  return isl_schedule_tree_mark_get_id(node->tree);
2212
154
}
2213
2214
/* Replace the child at position "pos" of the sequence node "node"
2215
 * by the children of sequence root node of "tree".
2216
 */
2217
__isl_give isl_schedule_node *isl_schedule_node_sequence_splice(
2218
  __isl_take isl_schedule_node *node, int pos,
2219
  __isl_take isl_schedule_tree *tree)
2220
0
{
2221
0
  isl_schedule_tree *node_tree;
2222
0
2223
0
  if (!node || !tree)
2224
0
    goto error;
2225
0
  if (isl_schedule_node_get_type(node) != isl_schedule_node_sequence)
2226
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
2227
0
      "not a sequence node", goto error);
2228
0
  if (isl_schedule_tree_get_type(tree) != isl_schedule_node_sequence)
2229
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
2230
0
      "not a sequence node", goto error);
2231
0
  node_tree = isl_schedule_node_get_tree(node);
2232
0
  node_tree = isl_schedule_tree_sequence_splice(node_tree, pos, tree);
2233
0
  node = isl_schedule_node_graft_tree(node, node_tree);
2234
0
2235
0
  return node;
2236
0
error:
2237
0
  isl_schedule_node_free(node);
2238
0
  isl_schedule_tree_free(tree);
2239
0
  return NULL;
2240
0
}
2241
2242
/* Given a sequence node "node", with a child at position "pos" that
2243
 * is also a sequence node, attach the children of that node directly
2244
 * as children of "node" at that position, replacing the original child.
2245
 *
2246
 * The filters of these children are intersected with the filter
2247
 * of the child at position "pos".
2248
 */
2249
__isl_give isl_schedule_node *isl_schedule_node_sequence_splice_child(
2250
  __isl_take isl_schedule_node *node, int pos)
2251
0
{
2252
0
  int i, n;
2253
0
  isl_union_set *filter;
2254
0
  isl_schedule_node *child;
2255
0
  isl_schedule_tree *tree;
2256
0
2257
0
  if (!node)
2258
0
    return NULL;
2259
0
  if (isl_schedule_node_get_type(node) != isl_schedule_node_sequence)
2260
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
2261
0
      "not a sequence node",
2262
0
      return isl_schedule_node_free(node));
2263
0
  node = isl_schedule_node_child(node, pos);
2264
0
  node = isl_schedule_node_child(node, 0);
2265
0
  if (isl_schedule_node_get_type(node) != isl_schedule_node_sequence)
2266
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
2267
0
      "not a sequence node",
2268
0
      return isl_schedule_node_free(node));
2269
0
  child = isl_schedule_node_copy(node);
2270
0
  node = isl_schedule_node_parent(node);
2271
0
  filter = isl_schedule_node_filter_get_filter(node);
2272
0
  n = isl_schedule_node_n_children(child);
2273
0
  for (i = 0; i < n; ++i) {
2274
0
    child = isl_schedule_node_child(child, i);
2275
0
    child = isl_schedule_node_filter_intersect_filter(child,
2276
0
            isl_union_set_copy(filter));
2277
0
    child = isl_schedule_node_parent(child);
2278
0
  }
2279
0
  isl_union_set_free(filter);
2280
0
  tree = isl_schedule_node_get_tree(child);
2281
0
  isl_schedule_node_free(child);
2282
0
  node = isl_schedule_node_parent(node);
2283
0
  node = isl_schedule_node_sequence_splice(node, pos, tree);
2284
0
2285
0
  return node;
2286
0
}
2287
2288
/* Update the ancestors of "node" to point to the tree that "node"
2289
 * now points to.
2290
 * That is, replace the child in the original parent that corresponds
2291
 * to the current tree position by node->tree and continue updating
2292
 * the ancestors in the same way until the root is reached.
2293
 *
2294
 * If "fn" is not NULL, then it is called on each ancestor as we move up
2295
 * the tree so that it can modify the ancestor before it is added
2296
 * to the list of ancestors of the modified node.
2297
 * The additional "pos" argument records the position
2298
 * of the "tree" argument in the original schedule tree.
2299
 *
2300
 * If "node" originally points to a leaf of the schedule tree, then make sure
2301
 * that in the end it points to a leaf in the updated schedule tree.
2302
 */
2303
static __isl_give isl_schedule_node *update_ancestors(
2304
  __isl_take isl_schedule_node *node,
2305
  __isl_give isl_schedule_tree *(*fn)(__isl_take isl_schedule_tree *tree,
2306
    __isl_keep isl_schedule_node *pos, void *user), void *user)
2307
7.02k
{
2308
7.02k
  int i, n;
2309
7.02k
  int is_leaf;
2310
7.02k
  isl_schedule_tree *tree;
2311
7.02k
  isl_schedule_node *pos = NULL;
2312
7.02k
2313
7.02k
  if (fn)
2314
3
    pos = isl_schedule_node_copy(node);
2315
7.02k
2316
7.02k
  node = isl_schedule_node_cow(node);
2317
7.02k
  if (!node)
2318
0
    return isl_schedule_node_free(pos);
2319
7.02k
2320
7.02k
  n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
2321
7.02k
  tree = isl_schedule_tree_copy(node->tree);
2322
7.02k
2323
19.3k
  for (i = n - 1; i >= 0; 
--i12.3k
) {
2324
12.3k
    isl_schedule_tree *parent;
2325
12.3k
2326
12.3k
    parent = isl_schedule_tree_list_get_schedule_tree(
2327
12.3k
                node->ancestors, i);
2328
12.3k
    parent = isl_schedule_tree_replace_child(parent,
2329
12.3k
                node->child_pos[i], tree);
2330
12.3k
    if (fn) {
2331
9
      pos = isl_schedule_node_parent(pos);
2332
9
      parent = fn(parent, pos, user);
2333
9
    }
2334
12.3k
    node->ancestors = isl_schedule_tree_list_set_schedule_tree(
2335
12.3k
          node->ancestors, i, isl_schedule_tree_copy(parent));
2336
12.3k
2337
12.3k
    tree = parent;
2338
12.3k
  }
2339
7.02k
2340
7.02k
  if (fn)
2341
3
    isl_schedule_node_free(pos);
2342
7.02k
2343
7.02k
  is_leaf = isl_schedule_tree_is_leaf(node->tree);
2344
7.02k
  node->schedule = isl_schedule_set_root(node->schedule, tree);
2345
7.02k
  if (is_leaf) {
2346
201
    isl_schedule_tree_free(node->tree);
2347
201
    node->tree = isl_schedule_node_get_leaf(node);
2348
201
  }
2349
7.02k
2350
7.02k
  if (!node->schedule || !node->ancestors)
2351
0
    return isl_schedule_node_free(node);
2352
7.02k
2353
7.02k
  return node;
2354
7.02k
}
2355
2356
/* Replace the subtree that "pos" points to by "tree", updating
2357
 * the ancestors to maintain a consistent state.
2358
 */
2359
__isl_give isl_schedule_node *isl_schedule_node_graft_tree(
2360
  __isl_take isl_schedule_node *pos, __isl_take isl_schedule_tree *tree)
2361
7.84k
{
2362
7.84k
  if (!tree || !pos)
2363
0
    goto error;
2364
7.84k
  if (pos->tree == tree) {
2365
824
    isl_schedule_tree_free(tree);
2366
824
    return pos;
2367
824
  }
2368
7.02k
2369
7.02k
  pos = isl_schedule_node_cow(pos);
2370
7.02k
  if (!pos)
2371
0
    goto error;
2372
7.02k
2373
7.02k
  isl_schedule_tree_free(pos->tree);
2374
7.02k
  pos->tree = tree;
2375
7.02k
2376
7.02k
  return update_ancestors(pos, NULL, NULL);
2377
0
error:
2378
0
  isl_schedule_node_free(pos);
2379
0
  isl_schedule_tree_free(tree);
2380
0
  return NULL;
2381
7.02k
}
2382
2383
/* Make sure we can insert a node between "node" and its parent.
2384
 * Return -1 on error, reporting the reason why we cannot insert a node.
2385
 */
2386
static int check_insert(__isl_keep isl_schedule_node *node)
2387
2.26k
{
2388
2.26k
  int has_parent;
2389
2.26k
  enum isl_schedule_node_type type;
2390
2.26k
2391
2.26k
  has_parent = isl_schedule_node_has_parent(node);
2392
2.26k
  if (has_parent < 0)
2393
0
    return -1;
2394
2.26k
  if (!has_parent)
2395
2.26k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2396
2.26k
      "cannot insert node outside of root", return -1);
2397
2.26k
2398
2.26k
  type = isl_schedule_node_get_parent_type(node);
2399
2.26k
  if (type == isl_schedule_node_error)
2400
0
    return -1;
2401
2.26k
  if (type == isl_schedule_node_set || type == isl_schedule_node_sequence)
2402
2.26k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2403
2.26k
      "cannot insert node between set or sequence node "
2404
2.26k
      "and its filter children", return -1);
2405
2.26k
2406
2.26k
  return 0;
2407
2.26k
}
2408
2409
/* Insert a band node with partial schedule "mupa" between "node" and
2410
 * its parent.
2411
 * Return a pointer to the new band node.
2412
 *
2413
 * If any of the nodes in the subtree rooted at "node" depend on
2414
 * the set of outer band nodes then we refuse to insert the band node.
2415
 */
2416
__isl_give isl_schedule_node *isl_schedule_node_insert_partial_schedule(
2417
  __isl_take isl_schedule_node *node,
2418
  __isl_take isl_multi_union_pw_aff *mupa)
2419
1.98k
{
2420
1.98k
  int anchored;
2421
1.98k
  isl_schedule_band *band;
2422
1.98k
  isl_schedule_tree *tree;
2423
1.98k
2424
1.98k
  if (check_insert(node) < 0)
2425
0
    node = isl_schedule_node_free(node);
2426
1.98k
  anchored = isl_schedule_node_is_subtree_anchored(node);
2427
1.98k
  if (anchored < 0)
2428
0
    goto error;
2429
1.98k
  if (anchored)
2430
1.98k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2431
1.98k
      "cannot insert band node in anchored subtree",
2432
1.98k
      goto error);
2433
1.98k
2434
1.98k
  tree = isl_schedule_node_get_tree(node);
2435
1.98k
  band = isl_schedule_band_from_multi_union_pw_aff(mupa);
2436
1.98k
  tree = isl_schedule_tree_insert_band(tree, band);
2437
1.98k
  node = isl_schedule_node_graft_tree(node, tree);
2438
1.98k
2439
1.98k
  return node;
2440
0
error:
2441
0
  isl_schedule_node_free(node);
2442
0
  isl_multi_union_pw_aff_free(mupa);
2443
0
  return NULL;
2444
1.98k
}
2445
2446
/* Insert a context node with context "context" between "node" and its parent.
2447
 * Return a pointer to the new context node.
2448
 */
2449
__isl_give isl_schedule_node *isl_schedule_node_insert_context(
2450
  __isl_take isl_schedule_node *node, __isl_take isl_set *context)
2451
0
{
2452
0
  isl_schedule_tree *tree;
2453
0
2454
0
  if (check_insert(node) < 0)
2455
0
    node = isl_schedule_node_free(node);
2456
0
2457
0
  tree = isl_schedule_node_get_tree(node);
2458
0
  tree = isl_schedule_tree_insert_context(tree, context);
2459
0
  node = isl_schedule_node_graft_tree(node, tree);
2460
0
2461
0
  return node;
2462
0
}
2463
2464
/* Insert an expansion node with the given "contraction" and "expansion"
2465
 * between "node" and its parent.
2466
 * Return a pointer to the new expansion node.
2467
 *
2468
 * Typically the domain and range spaces of the expansion are different.
2469
 * This means that only one of them can refer to the current domain space
2470
 * in a consistent tree.  It is up to the caller to ensure that the tree
2471
 * returns to a consistent state.
2472
 */
2473
__isl_give isl_schedule_node *isl_schedule_node_insert_expansion(
2474
  __isl_take isl_schedule_node *node,
2475
  __isl_take isl_union_pw_multi_aff *contraction,
2476
  __isl_take isl_union_map *expansion)
2477
3
{
2478
3
  isl_schedule_tree *tree;
2479
3
2480
3
  if (check_insert(node) < 0)
2481
0
    node = isl_schedule_node_free(node);
2482
3
2483
3
  tree = isl_schedule_node_get_tree(node);
2484
3
  tree = isl_schedule_tree_insert_expansion(tree, contraction, expansion);
2485
3
  node = isl_schedule_node_graft_tree(node, tree);
2486
3
2487
3
  return node;
2488
3
}
2489
2490
/* Insert an extension node with extension "extension" between "node" and
2491
 * its parent.
2492
 * Return a pointer to the new extension node.
2493
 */
2494
__isl_give isl_schedule_node *isl_schedule_node_insert_extension(
2495
  __isl_take isl_schedule_node *node,
2496
  __isl_take isl_union_map *extension)
2497
24
{
2498
24
  isl_schedule_tree *tree;
2499
24
2500
24
  tree = isl_schedule_node_get_tree(node);
2501
24
  tree = isl_schedule_tree_insert_extension(tree, extension);
2502
24
  node = isl_schedule_node_graft_tree(node, tree);
2503
24
2504
24
  return node;
2505
24
}
2506
2507
/* Insert a filter node with filter "filter" between "node" and its parent.
2508
 * Return a pointer to the new filter node.
2509
 */
2510
__isl_give isl_schedule_node *isl_schedule_node_insert_filter(
2511
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
2512
24
{
2513
24
  isl_schedule_tree *tree;
2514
24
2515
24
  if (check_insert(node) < 0)
2516
0
    node = isl_schedule_node_free(node);
2517
24
2518
24
  tree = isl_schedule_node_get_tree(node);
2519
24
  tree = isl_schedule_tree_insert_filter(tree, filter);
2520
24
  node = isl_schedule_node_graft_tree(node, tree);
2521
24
2522
24
  return node;
2523
24
}
2524
2525
/* Insert a guard node with guard "guard" between "node" and its parent.
2526
 * Return a pointer to the new guard node.
2527
 */
2528
__isl_give isl_schedule_node *isl_schedule_node_insert_guard(
2529
  __isl_take isl_schedule_node *node, __isl_take isl_set *guard)
2530
0
{
2531
0
  isl_schedule_tree *tree;
2532
0
2533
0
  if (check_insert(node) < 0)
2534
0
    node = isl_schedule_node_free(node);
2535
0
2536
0
  tree = isl_schedule_node_get_tree(node);
2537
0
  tree = isl_schedule_tree_insert_guard(tree, guard);
2538
0
  node = isl_schedule_node_graft_tree(node, tree);
2539
0
2540
0
  return node;
2541
0
}
2542
2543
/* Insert a mark node with mark identifier "mark" between "node" and
2544
 * its parent.
2545
 * Return a pointer to the new mark node.
2546
 */
2547
__isl_give isl_schedule_node *isl_schedule_node_insert_mark(
2548
  __isl_take isl_schedule_node *node, __isl_take isl_id *mark)
2549
158
{
2550
158
  isl_schedule_tree *tree;
2551
158
2552
158
  if (check_insert(node) < 0)
2553
0
    node = isl_schedule_node_free(node);
2554
158
2555
158
  tree = isl_schedule_node_get_tree(node);
2556
158
  tree = isl_schedule_tree_insert_mark(tree, mark);
2557
158
  node = isl_schedule_node_graft_tree(node, tree);
2558
158
2559
158
  return node;
2560
158
}
2561
2562
/* Attach the current subtree of "node" to a sequence of filter tree nodes
2563
 * with filters described by "filters", attach this sequence
2564
 * of filter tree nodes as children to a new tree of type "type" and
2565
 * replace the original subtree of "node" by this new tree.
2566
 * Each copy of the original subtree is simplified with respect
2567
 * to the corresponding filter.
2568
 */
2569
static __isl_give isl_schedule_node *isl_schedule_node_insert_children(
2570
  __isl_take isl_schedule_node *node,
2571
  enum isl_schedule_node_type type,
2572
  __isl_take isl_union_set_list *filters)
2573
71
{
2574
71
  int i, n;
2575
71
  isl_ctx *ctx;
2576
71
  isl_schedule_tree *tree;
2577
71
  isl_schedule_tree_list *list;
2578
71
2579
71
  if (check_insert(node) < 0)
2580
0
    node = isl_schedule_node_free(node);
2581
71
2582
71
  if (!node || !filters)
2583
0
    goto error;
2584
71
2585
71
  ctx = isl_schedule_node_get_ctx(node);
2586
71
  n = isl_union_set_list_n_union_set(filters);
2587
71
  list = isl_schedule_tree_list_alloc(ctx, n);
2588
225
  for (i = 0; i < n; 
++i154
) {
2589
154
    isl_schedule_node *node_i;
2590
154
    isl_schedule_tree *tree;
2591
154
    isl_union_set *filter;
2592
154
2593
154
    filter = isl_union_set_list_get_union_set(filters, i);
2594
154
    node_i = isl_schedule_node_copy(node);
2595
154
    node_i = isl_schedule_node_gist(node_i,
2596
154
            isl_union_set_copy(filter));
2597
154
    tree = isl_schedule_node_get_tree(node_i);
2598
154
    isl_schedule_node_free(node_i);
2599
154
    tree = isl_schedule_tree_insert_filter(tree, filter);
2600
154
    list = isl_schedule_tree_list_add(list, tree);
2601
154
  }
2602
71
  tree = isl_schedule_tree_from_children(type, list);
2603
71
  node = isl_schedule_node_graft_tree(node, tree);
2604
71
2605
71
  isl_union_set_list_free(filters);
2606
71
  return node;
2607
0
error:
2608
0
  isl_union_set_list_free(filters);
2609
0
  isl_schedule_node_free(node);
2610
0
  return NULL;
2611
71
}
2612
2613
/* Insert a sequence node with child filters "filters" between "node" and
2614
 * its parent.  That is, the tree that "node" points to is attached
2615
 * to each of the child nodes of the filter nodes.
2616
 * Return a pointer to the new sequence node.
2617
 */
2618
__isl_give isl_schedule_node *isl_schedule_node_insert_sequence(
2619
  __isl_take isl_schedule_node *node,
2620
  __isl_take isl_union_set_list *filters)
2621
58
{
2622
58
  return isl_schedule_node_insert_children(node,
2623
58
          isl_schedule_node_sequence, filters);
2624
58
}
2625
2626
/* Insert a set node with child filters "filters" between "node" and
2627
 * its parent.  That is, the tree that "node" points to is attached
2628
 * to each of the child nodes of the filter nodes.
2629
 * Return a pointer to the new set node.
2630
 */
2631
__isl_give isl_schedule_node *isl_schedule_node_insert_set(
2632
  __isl_take isl_schedule_node *node,
2633
  __isl_take isl_union_set_list *filters)
2634
13
{
2635
13
  return isl_schedule_node_insert_children(node,
2636
13
          isl_schedule_node_set, filters);
2637
13
}
2638
2639
/* Remove "node" from its schedule tree and return a pointer
2640
 * to the leaf at the same position in the updated schedule tree.
2641
 *
2642
 * It is not allowed to remove the root of a schedule tree or
2643
 * a child of a set or sequence node.
2644
 */
2645
__isl_give isl_schedule_node *isl_schedule_node_cut(
2646
  __isl_take isl_schedule_node *node)
2647
441
{
2648
441
  isl_schedule_tree *leaf;
2649
441
  enum isl_schedule_node_type parent_type;
2650
441
2651
441
  if (!node)
2652
0
    return NULL;
2653
441
  if (!isl_schedule_node_has_parent(node))
2654
441
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2655
441
      "cannot cut root", return isl_schedule_node_free(node));
2656
441
2657
441
  parent_type = isl_schedule_node_get_parent_type(node);
2658
441
  if (parent_type == isl_schedule_node_set ||
2659
441
      parent_type == isl_schedule_node_sequence)
2660
441
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2661
441
      "cannot cut child of set or sequence",
2662
441
      return isl_schedule_node_free(node));
2663
441
2664
441
  leaf = isl_schedule_node_get_leaf(node);
2665
441
  return isl_schedule_node_graft_tree(node, leaf);
2666
441
}
2667
2668
/* Remove a single node from the schedule tree, attaching the child
2669
 * of "node" directly to its parent.
2670
 * Return a pointer to this former child or to the leaf the position
2671
 * of the original node if there was no child.
2672
 * It is not allowed to remove the root of a schedule tree,
2673
 * a set or sequence node, a child of a set or sequence node or
2674
 * a band node with an anchored subtree.
2675
 */
2676
__isl_give isl_schedule_node *isl_schedule_node_delete(
2677
  __isl_take isl_schedule_node *node)
2678
436
{
2679
436
  int n;
2680
436
  isl_schedule_tree *tree;
2681
436
  enum isl_schedule_node_type type;
2682
436
2683
436
  if (!node)
2684
0
    return NULL;
2685
436
2686
436
  if (isl_schedule_node_get_tree_depth(node) == 0)
2687
436
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2688
436
      "cannot delete root node",
2689
436
      return isl_schedule_node_free(node));
2690
436
  n = isl_schedule_node_n_children(node);
2691
436
  if (n != 1)
2692
436
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2693
436
      "can only delete node with a single child",
2694
436
      return isl_schedule_node_free(node));
2695
436
  type = isl_schedule_node_get_parent_type(node);
2696
436
  if (type == isl_schedule_node_sequence || type == isl_schedule_node_set)
2697
436
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
2698
436
      "cannot delete child of set or sequence",
2699
436
      return isl_schedule_node_free(node));
2700
436
  if (isl_schedule_node_get_type(node) == isl_schedule_node_band) {
2701
94
    int anchored;
2702
94
2703
94
    anchored = isl_schedule_node_is_subtree_anchored(node);
2704
94
    if (anchored < 0)
2705
0
      return isl_schedule_node_free(node);
2706
94
    if (anchored)
2707
94
      
isl_die0
(isl_schedule_node_get_ctx(node),
2708
94
        isl_error_invalid,
2709
94
        "cannot delete band node with anchored subtree",
2710
94
        return isl_schedule_node_free(node));
2711
94
  }
2712
436
2713
436
  tree = isl_schedule_node_get_tree(node);
2714
436
  if (!tree || isl_schedule_tree_has_children(tree)) {
2715
275
    tree = isl_schedule_tree_child(tree, 0);
2716
275
  } else {
2717
161
    isl_schedule_tree_free(tree);
2718
161
    tree = isl_schedule_node_get_leaf(node);
2719
161
  }
2720
436
  node = isl_schedule_node_graft_tree(node, tree);
2721
436
2722
436
  return node;
2723
436
}
2724
2725
/* Internal data structure for the group_ancestor callback.
2726
 *
2727
 * If "finished" is set, then we no longer need to modify
2728
 * any further ancestors.
2729
 *
2730
 * "contraction" and "expansion" represent the expansion
2731
 * that reflects the grouping.
2732
 *
2733
 * "domain" contains the domain elements that reach the position
2734
 * where the grouping is performed.  That is, it is the range
2735
 * of the resulting expansion.
2736
 * "domain_universe" is the universe of "domain".
2737
 * "group" is the set of group elements, i.e., the domain
2738
 * of the resulting expansion.
2739
 * "group_universe" is the universe of "group".
2740
 *
2741
 * "sched" is the schedule for the group elements, in pratice
2742
 * an identity mapping on "group_universe".
2743
 * "dim" is the dimension of "sched".
2744
 */
2745
struct isl_schedule_group_data {
2746
  int finished;
2747
2748
  isl_union_map *expansion;
2749
  isl_union_pw_multi_aff *contraction;
2750
2751
  isl_union_set *domain;
2752
  isl_union_set *domain_universe;
2753
  isl_union_set *group;
2754
  isl_union_set *group_universe;
2755
2756
  int dim;
2757
  isl_multi_aff *sched;
2758
};
2759
2760
/* Is domain covered by data->domain within data->domain_universe?
2761
 */
2762
static int locally_covered_by_domain(__isl_keep isl_union_set *domain,
2763
  struct isl_schedule_group_data *data)
2764
3
{
2765
3
  int is_subset;
2766
3
  isl_union_set *test;
2767
3
2768
3
  test = isl_union_set_copy(domain);
2769
3
  test = isl_union_set_intersect(test,
2770
3
          isl_union_set_copy(data->domain_universe));
2771
3
  is_subset = isl_union_set_is_subset(test, data->domain);
2772
3
  isl_union_set_free(test);
2773
3
2774
3
  return is_subset;
2775
3
}
2776
2777
/* Update the band tree root "tree" to refer to the group instances
2778
 * in data->group rather than the original domain elements in data->domain.
2779
 * "pos" is the position in the original schedule tree where the modified
2780
 * "tree" will be attached.
2781
 *
2782
 * Add the part of the identity schedule on the group instances data->sched
2783
 * that corresponds to this band node to the band schedule.
2784
 * If the domain elements that reach the node and that are part
2785
 * of data->domain_universe are all elements of data->domain (and therefore
2786
 * replaced by the group instances) then this data->domain_universe
2787
 * is removed from the domain of the band schedule.
2788
 */
2789
static __isl_give isl_schedule_tree *group_band(
2790
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
2791
  struct isl_schedule_group_data *data)
2792
2
{
2793
2
  isl_union_set *domain;
2794
2
  isl_multi_aff *ma;
2795
2
  isl_multi_union_pw_aff *mupa, *partial;
2796
2
  int is_covered;
2797
2
  int depth, n, has_id;
2798
2
2799
2
  domain = isl_schedule_node_get_domain(pos);
2800
2
  is_covered = locally_covered_by_domain(domain, data);
2801
2
  if (is_covered >= 0 && is_covered) {
2802
2
    domain = isl_union_set_universe(domain);
2803
2
    domain = isl_union_set_subtract(domain,
2804
2
          isl_union_set_copy(data->domain_universe));
2805
2
    tree = isl_schedule_tree_band_intersect_domain(tree, domain);
2806
2
  } else
2807
0
    isl_union_set_free(domain);
2808
2
  if (is_covered < 0)
2809
0
    return isl_schedule_tree_free(tree);
2810
2
  depth = isl_schedule_node_get_schedule_depth(pos);
2811
2
  n = isl_schedule_tree_band_n_member(tree);
2812
2
  ma = isl_multi_aff_copy(data->sched);
2813
2
  ma = isl_multi_aff_drop_dims(ma, isl_dim_out, 0, depth);
2814
2
  ma = isl_multi_aff_drop_dims(ma, isl_dim_out, n, data->dim - depth - n);
2815
2
  mupa = isl_multi_union_pw_aff_from_multi_aff(ma);
2816
2
  partial = isl_schedule_tree_band_get_partial_schedule(tree);
2817
2
  has_id = isl_multi_union_pw_aff_has_tuple_id(partial, isl_dim_set);
2818
2
  if (has_id < 0) {
2819
0
    partial = isl_multi_union_pw_aff_free(partial);
2820
2
  } else if (has_id) {
2821
0
    isl_id *id;
2822
0
    id = isl_multi_union_pw_aff_get_tuple_id(partial, isl_dim_set);
2823
0
    mupa = isl_multi_union_pw_aff_set_tuple_id(mupa,
2824
0
                  isl_dim_set, id);
2825
0
  }
2826
2
  partial = isl_multi_union_pw_aff_union_add(partial, mupa);
2827
2
  tree = isl_schedule_tree_band_set_partial_schedule(tree, partial);
2828
2
2829
2
  return tree;
2830
2
}
2831
2832
/* Drop the parameters in "uset" that are not also in "space".
2833
 * "n" is the number of parameters in "space".
2834
 */
2835
static __isl_give isl_union_set *union_set_drop_extra_params(
2836
  __isl_take isl_union_set *uset, __isl_keep isl_space *space, int n)
2837
0
{
2838
0
  int n2;
2839
0
2840
0
  uset = isl_union_set_align_params(uset, isl_space_copy(space));
2841
0
  n2 = isl_union_set_dim(uset, isl_dim_param);
2842
0
  uset = isl_union_set_project_out(uset, isl_dim_param, n, n2 - n);
2843
0
2844
0
  return uset;
2845
0
}
2846
2847
/* Update the context tree root "tree" to refer to the group instances
2848
 * in data->group rather than the original domain elements in data->domain.
2849
 * "pos" is the position in the original schedule tree where the modified
2850
 * "tree" will be attached.
2851
 *
2852
 * We do not actually need to update "tree" since a context node only
2853
 * refers to the schedule space.  However, we may need to update "data"
2854
 * to not refer to any parameters introduced by the context node.
2855
 */
2856
static __isl_give isl_schedule_tree *group_context(
2857
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
2858
  struct isl_schedule_group_data *data)
2859
0
{
2860
0
  isl_space *space;
2861
0
  isl_union_set *domain;
2862
0
  int n1, n2;
2863
0
  int involves;
2864
0
2865
0
  if (isl_schedule_node_get_tree_depth(pos) == 1)
2866
0
    return tree;
2867
0
2868
0
  domain = isl_schedule_node_get_universe_domain(pos);
2869
0
  space = isl_union_set_get_space(domain);
2870
0
  isl_union_set_free(domain);
2871
0
2872
0
  n1 = isl_space_dim(space, isl_dim_param);
2873
0
  data->expansion = isl_union_map_align_params(data->expansion, space);
2874
0
  n2 = isl_union_map_dim(data->expansion, isl_dim_param);
2875
0
2876
0
  if (!data->expansion)
2877
0
    return isl_schedule_tree_free(tree);
2878
0
  if (n1 == n2)
2879
0
    return tree;
2880
0
2881
0
  involves = isl_union_map_involves_dims(data->expansion,
2882
0
        isl_dim_param, n1, n2 - n1);
2883
0
  if (involves < 0)
2884
0
    return isl_schedule_tree_free(tree);
2885
0
  if (involves)
2886
0
    isl_die(isl_schedule_node_get_ctx(pos), isl_error_invalid,
2887
0
      "grouping cannot only refer to global parameters",
2888
0
      return isl_schedule_tree_free(tree));
2889
0
2890
0
  data->expansion = isl_union_map_project_out(data->expansion,
2891
0
        isl_dim_param, n1, n2 - n1);
2892
0
  space = isl_union_map_get_space(data->expansion);
2893
0
2894
0
  data->contraction = isl_union_pw_multi_aff_align_params(
2895
0
        data->contraction, isl_space_copy(space));
2896
0
  n2 = isl_union_pw_multi_aff_dim(data->contraction, isl_dim_param);
2897
0
  data->contraction = isl_union_pw_multi_aff_drop_dims(data->contraction,
2898
0
        isl_dim_param, n1, n2 - n1);
2899
0
2900
0
  data->domain = union_set_drop_extra_params(data->domain, space, n1);
2901
0
  data->domain_universe =
2902
0
    union_set_drop_extra_params(data->domain_universe, space, n1);
2903
0
  data->group = union_set_drop_extra_params(data->group, space, n1);
2904
0
  data->group_universe =
2905
0
    union_set_drop_extra_params(data->group_universe, space, n1);
2906
0
2907
0
  data->sched = isl_multi_aff_align_params(data->sched,
2908
0
        isl_space_copy(space));
2909
0
  n2 = isl_multi_aff_dim(data->sched, isl_dim_param);
2910
0
  data->sched = isl_multi_aff_drop_dims(data->sched,
2911
0
        isl_dim_param, n1, n2 - n1);
2912
0
2913
0
  isl_space_free(space);
2914
0
2915
0
  return tree;
2916
0
}
2917
2918
/* Update the domain tree root "tree" to refer to the group instances
2919
 * in data->group rather than the original domain elements in data->domain.
2920
 * "pos" is the position in the original schedule tree where the modified
2921
 * "tree" will be attached.
2922
 *
2923
 * We first double-check that all grouped domain elements are actually
2924
 * part of the root domain and then replace those elements by the group
2925
 * instances.
2926
 */
2927
static __isl_give isl_schedule_tree *group_domain(
2928
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
2929
  struct isl_schedule_group_data *data)
2930
2
{
2931
2
  isl_union_set *domain;
2932
2
  int is_subset;
2933
2
2934
2
  domain = isl_schedule_tree_domain_get_domain(tree);
2935
2
  is_subset = isl_union_set_is_subset(data->domain, domain);
2936
2
  isl_union_set_free(domain);
2937
2
  if (is_subset < 0)
2938
0
    return isl_schedule_tree_free(tree);
2939
2
  if (!is_subset)
2940
2
    
isl_die0
(isl_schedule_tree_get_ctx(tree), isl_error_internal,
2941
2
      "grouped domain should be part of outer domain",
2942
2
      return isl_schedule_tree_free(tree));
2943
2
  domain = isl_schedule_tree_domain_get_domain(tree);
2944
2
  domain = isl_union_set_subtract(domain,
2945
2
        isl_union_set_copy(data->domain));
2946
2
  domain = isl_union_set_union(domain, isl_union_set_copy(data->group));
2947
2
  tree = isl_schedule_tree_domain_set_domain(tree, domain);
2948
2
2949
2
  return tree;
2950
2
}
2951
2952
/* Update the expansion tree root "tree" to refer to the group instances
2953
 * in data->group rather than the original domain elements in data->domain.
2954
 * "pos" is the position in the original schedule tree where the modified
2955
 * "tree" will be attached.
2956
 *
2957
 * Let G_1 -> D_1 be the expansion of "tree" and G_2 -> D_2 the newly
2958
 * introduced expansion in a descendant of "tree".
2959
 * We first double-check that D_2 is a subset of D_1.
2960
 * Then we remove D_2 from the range of G_1 -> D_1 and add the mapping
2961
 * G_1 -> D_1 . D_2 -> G_2.
2962
 * Simmilarly, we restrict the domain of the contraction to the universe
2963
 * of the range of the updated expansion and add G_2 -> D_2 . D_1 -> G_1,
2964
 * attempting to remove the domain constraints of this additional part.
2965
 */
2966
static __isl_give isl_schedule_tree *group_expansion(
2967
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
2968
  struct isl_schedule_group_data *data)
2969
1
{
2970
1
  isl_union_set *domain;
2971
1
  isl_union_map *expansion, *umap;
2972
1
  isl_union_pw_multi_aff *contraction, *upma;
2973
1
  int is_subset;
2974
1
2975
1
  expansion = isl_schedule_tree_expansion_get_expansion(tree);
2976
1
  domain = isl_union_map_range(expansion);
2977
1
  is_subset = isl_union_set_is_subset(data->domain, domain);
2978
1
  isl_union_set_free(domain);
2979
1
  if (is_subset < 0)
2980
0
    return isl_schedule_tree_free(tree);
2981
1
  if (!is_subset)
2982
1
    
isl_die0
(isl_schedule_tree_get_ctx(tree), isl_error_internal,
2983
1
      "grouped domain should be part "
2984
1
      "of outer expansion domain",
2985
1
      return isl_schedule_tree_free(tree));
2986
1
  expansion = isl_schedule_tree_expansion_get_expansion(tree);
2987
1
  umap = isl_union_map_from_union_pw_multi_aff(
2988
1
      isl_union_pw_multi_aff_copy(data->contraction));
2989
1
  umap = isl_union_map_apply_range(expansion, umap);
2990
1
  expansion = isl_schedule_tree_expansion_get_expansion(tree);
2991
1
  expansion = isl_union_map_subtract_range(expansion,
2992
1
        isl_union_set_copy(data->domain));
2993
1
  expansion = isl_union_map_union(expansion, umap);
2994
1
  umap = isl_union_map_universe(isl_union_map_copy(expansion));
2995
1
  domain = isl_union_map_range(umap);
2996
1
  contraction = isl_schedule_tree_expansion_get_contraction(tree);
2997
1
  umap = isl_union_map_from_union_pw_multi_aff(contraction);
2998
1
  umap = isl_union_map_apply_range(isl_union_map_copy(data->expansion),
2999
1
          umap);
3000
1
  upma = isl_union_pw_multi_aff_from_union_map(umap);
3001
1
  contraction = isl_schedule_tree_expansion_get_contraction(tree);
3002
1
  contraction = isl_union_pw_multi_aff_intersect_domain(contraction,
3003
1
                domain);
3004
1
  domain = isl_union_pw_multi_aff_domain(
3005
1
        isl_union_pw_multi_aff_copy(upma));
3006
1
  upma = isl_union_pw_multi_aff_gist(upma, domain);
3007
1
  contraction = isl_union_pw_multi_aff_union_add(contraction, upma);
3008
1
  tree = isl_schedule_tree_expansion_set_contraction_and_expansion(tree,
3009
1
              contraction, expansion);
3010
1
3011
1
  return tree;
3012
1
}
3013
3014
/* Update the tree root "tree" to refer to the group instances
3015
 * in data->group rather than the original domain elements in data->domain.
3016
 * "pos" is the position in the original schedule tree where the modified
3017
 * "tree" will be attached.
3018
 *
3019
 * If we have come across a domain or expansion node before (data->finished
3020
 * is set), then we no longer need perform any modifications.
3021
 *
3022
 * If "tree" is a filter, then we add data->group_universe to the filter.
3023
 * We also remove data->domain_universe from the filter if all the domain
3024
 * elements in this universe that reach the filter node are part of
3025
 * the elements that are being grouped by data->expansion.
3026
 * If "tree" is a band, domain or expansion, then it is handled
3027
 * in a separate function.
3028
 */
3029
static __isl_give isl_schedule_tree *group_ancestor(
3030
  __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos,
3031
  void *user)
3032
9
{
3033
9
  struct isl_schedule_group_data *data = user;
3034
9
  isl_union_set *domain;
3035
9
  int is_covered;
3036
9
3037
9
  if (!tree || !pos)
3038
0
    return isl_schedule_tree_free(tree);
3039
9
3040
9
  if (data->finished)
3041
2
    return tree;
3042
7
3043
7
  switch (isl_schedule_tree_get_type(tree)) {
3044
7
  case isl_schedule_node_error:
3045
0
    return isl_schedule_tree_free(tree);
3046
7
  case isl_schedule_node_extension:
3047
0
    isl_die(isl_schedule_tree_get_ctx(tree), isl_error_unsupported,
3048
0
      "grouping not allowed in extended tree",
3049
0
      return isl_schedule_tree_free(tree));
3050
2
  case isl_schedule_node_band:
3051
2
    tree = group_band(tree, pos, data);
3052
2
    break;
3053
0
  case isl_schedule_node_context:
3054
0
    tree = group_context(tree, pos, data);
3055
0
    break;
3056
2
  case isl_schedule_node_domain:
3057
2
    tree = group_domain(tree, pos, data);
3058
2
    data->finished = 1;
3059
2
    break;
3060
1
  case isl_schedule_node_filter:
3061
1
    domain = isl_schedule_node_get_domain(pos);
3062
1
    is_covered = locally_covered_by_domain(domain, data);
3063
1
    isl_union_set_free(domain);
3064
1
    if (is_covered < 0)
3065
0
      return isl_schedule_tree_free(tree);
3066
1
    domain = isl_schedule_tree_filter_get_filter(tree);
3067
1
    if (is_covered)
3068
1
      domain = isl_union_set_subtract(domain,
3069
1
            isl_union_set_copy(data->domain_universe));
3070
1
    domain = isl_union_set_union(domain,
3071
1
            isl_union_set_copy(data->group_universe));
3072
1
    tree = isl_schedule_tree_filter_set_filter(tree, domain);
3073
1
    break;
3074
1
  case isl_schedule_node_expansion:
3075
1
    tree = group_expansion(tree, pos, data);
3076
1
    data->finished = 1;
3077
1
    break;
3078
1
  case isl_schedule_node_leaf:
3079
1
  case isl_schedule_node_guard:
3080
1
  case isl_schedule_node_mark:
3081
1
  case isl_schedule_node_sequence:
3082
1
  case isl_schedule_node_set:
3083
1
    break;
3084
7
  }
3085
7
3086
7
  return tree;
3087
7
}
3088
3089
/* Group the domain elements that reach "node" into instances
3090
 * of a single statement with identifier "group_id".
3091
 * In particular, group the domain elements according to their
3092
 * prefix schedule.
3093
 *
3094
 * That is, introduce an expansion node with as contraction
3095
 * the prefix schedule (with the target space replaced by "group_id")
3096
 * and as expansion the inverse of this contraction (with its range
3097
 * intersected with the domain elements that reach "node").
3098
 * The outer nodes are then modified to refer to the group instances
3099
 * instead of the original domain elements.
3100
 *
3101
 * No instance of "group_id" is allowed to reach "node" prior
3102
 * to the grouping.
3103
 * No ancestor of "node" is allowed to be an extension node.
3104
 *
3105
 * Return a pointer to original node in tree, i.e., the child
3106
 * of the newly introduced expansion node.
3107
 */
3108
__isl_give isl_schedule_node *isl_schedule_node_group(
3109
  __isl_take isl_schedule_node *node, __isl_take isl_id *group_id)
3110
3
{
3111
3
  struct isl_schedule_group_data data = { 0 };
3112
3
  isl_space *space;
3113
3
  isl_union_set *domain;
3114
3
  isl_union_pw_multi_aff *contraction;
3115
3
  isl_union_map *expansion;
3116
3
  int disjoint;
3117
3
3118
3
  if (!node || !group_id)
3119
0
    goto error;
3120
3
  if (check_insert(node) < 0)
3121
0
    goto error;
3122
3
3123
3
  domain = isl_schedule_node_get_domain(node);
3124
3
  data.domain = isl_union_set_copy(domain);
3125
3
  data.domain_universe = isl_union_set_copy(domain);
3126
3
  data.domain_universe = isl_union_set_universe(data.domain_universe);
3127
3
3128
3
  data.dim = isl_schedule_node_get_schedule_depth(node);
3129
3
  if (data.dim == 0) {
3130
0
    isl_ctx *ctx;
3131
0
    isl_set *set;
3132
0
    isl_union_set *group;
3133
0
    isl_union_map *univ;
3134
0
3135
0
    ctx = isl_schedule_node_get_ctx(node);
3136
0
    space = isl_space_set_alloc(ctx, 0, 0);
3137
0
    space = isl_space_set_tuple_id(space, isl_dim_set, group_id);
3138
0
    set = isl_set_universe(isl_space_copy(space));
3139
0
    group = isl_union_set_from_set(set);
3140
0
    expansion = isl_union_map_from_domain_and_range(domain, group);
3141
0
    univ = isl_union_map_universe(isl_union_map_copy(expansion));
3142
0
    contraction = isl_union_pw_multi_aff_from_union_map(univ);
3143
0
    expansion = isl_union_map_reverse(expansion);
3144
3
  } else {
3145
3
    isl_multi_union_pw_aff *prefix;
3146
3
    isl_union_set *univ;
3147
3
3148
3
    prefix =
3149
3
    isl_schedule_node_get_prefix_schedule_multi_union_pw_aff(node);
3150
3
    prefix = isl_multi_union_pw_aff_set_tuple_id(prefix,
3151
3
              isl_dim_set, group_id);
3152
3
    space = isl_multi_union_pw_aff_get_space(prefix);
3153
3
    contraction = isl_union_pw_multi_aff_from_multi_union_pw_aff(
3154
3
              prefix);
3155
3
    univ = isl_union_set_universe(isl_union_set_copy(domain));
3156
3
    contraction =
3157
3
        isl_union_pw_multi_aff_intersect_domain(contraction, univ);
3158
3
    expansion = isl_union_map_from_union_pw_multi_aff(
3159
3
            isl_union_pw_multi_aff_copy(contraction));
3160
3
    expansion = isl_union_map_reverse(expansion);
3161
3
    expansion = isl_union_map_intersect_range(expansion, domain);
3162
3
  }
3163
3
  space = isl_space_map_from_set(space);
3164
3
  data.sched = isl_multi_aff_identity(space);
3165
3
  data.group = isl_union_map_domain(isl_union_map_copy(expansion));
3166
3
  data.group = isl_union_set_coalesce(data.group);
3167
3
  data.group_universe = isl_union_set_copy(data.group);
3168
3
  data.group_universe = isl_union_set_universe(data.group_universe);
3169
3
  data.expansion = isl_union_map_copy(expansion);
3170
3
  data.contraction = isl_union_pw_multi_aff_copy(contraction);
3171
3
  node = isl_schedule_node_insert_expansion(node, contraction, expansion);
3172
3
3173
3
  disjoint = isl_union_set_is_disjoint(data.domain_universe,
3174
3
              data.group_universe);
3175
3
3176
3
  node = update_ancestors(node, &group_ancestor, &data);
3177
3
3178
3
  isl_union_set_free(data.domain);
3179
3
  isl_union_set_free(data.domain_universe);
3180
3
  isl_union_set_free(data.group);
3181
3
  isl_union_set_free(data.group_universe);
3182
3
  isl_multi_aff_free(data.sched);
3183
3
  isl_union_map_free(data.expansion);
3184
3
  isl_union_pw_multi_aff_free(data.contraction);
3185
3
3186
3
  node = isl_schedule_node_child(node, 0);
3187
3
3188
3
  if (!node || disjoint < 0)
3189
0
    return isl_schedule_node_free(node);
3190
3
  if (!disjoint)
3191
3
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
3192
3
      "group instances already reach node",
3193
3
      return isl_schedule_node_free(node));
3194
3
3195
3
  return node;
3196
0
error:
3197
0
  isl_schedule_node_free(node);
3198
0
  isl_id_free(group_id);
3199
0
  return NULL;
3200
3
}
3201
3202
/* Compute the gist of the given band node with respect to "context".
3203
 */
3204
__isl_give isl_schedule_node *isl_schedule_node_band_gist(
3205
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *context)
3206
291
{
3207
291
  isl_schedule_tree *tree;
3208
291
3209
291
  tree = isl_schedule_node_get_tree(node);
3210
291
  tree = isl_schedule_tree_band_gist(tree, context);
3211
291
  return isl_schedule_node_graft_tree(node, tree);
3212
291
}
3213
3214
/* Internal data structure for isl_schedule_node_gist.
3215
 * "n_expansion" is the number of outer expansion nodes
3216
 * with respect to the current position
3217
 * "filters" contains an element for each outer filter, expansion or
3218
 * extension node with respect to the current position, each representing
3219
 * the intersection of the previous element and the filter on the filter node
3220
 * or the expansion/extension of the previous element.
3221
 * The first element in the original context passed to isl_schedule_node_gist.
3222
 */
3223
struct isl_node_gist_data {
3224
  int n_expansion;
3225
  isl_union_set_list *filters;
3226
};
3227
3228
/* Enter the expansion node "node" during a isl_schedule_node_gist traversal.
3229
 *
3230
 * In particular, add an extra element to data->filters containing
3231
 * the expansion of the previous element and replace the expansion
3232
 * and contraction on "node" by the gist with respect to these filters.
3233
 * Also keep track of the fact that we have entered another expansion.
3234
 */
3235
static __isl_give isl_schedule_node *gist_enter_expansion(
3236
  __isl_take isl_schedule_node *node, struct isl_node_gist_data *data)
3237
0
{
3238
0
  int n;
3239
0
  isl_union_set *inner;
3240
0
  isl_union_map *expansion;
3241
0
  isl_union_pw_multi_aff *contraction;
3242
0
3243
0
  data->n_expansion++;
3244
0
3245
0
  n = isl_union_set_list_n_union_set(data->filters);
3246
0
  inner = isl_union_set_list_get_union_set(data->filters, n - 1);
3247
0
  expansion = isl_schedule_node_expansion_get_expansion(node);
3248
0
  inner = isl_union_set_apply(inner, expansion);
3249
0
3250
0
  contraction = isl_schedule_node_expansion_get_contraction(node);
3251
0
  contraction = isl_union_pw_multi_aff_gist(contraction,
3252
0
            isl_union_set_copy(inner));
3253
0
3254
0
  data->filters = isl_union_set_list_add(data->filters, inner);
3255
0
3256
0
  inner = isl_union_set_list_get_union_set(data->filters, n - 1);
3257
0
  expansion = isl_schedule_node_expansion_get_expansion(node);
3258
0
  expansion = isl_union_map_gist_domain(expansion, inner);
3259
0
  node = isl_schedule_node_expansion_set_contraction_and_expansion(node,
3260
0
            contraction, expansion);
3261
0
3262
0
  return node;
3263
0
}
3264
3265
/* Leave the expansion node "node" during a isl_schedule_node_gist traversal.
3266
 *
3267
 * In particular, remove the element in data->filters that was added by
3268
 * gist_enter_expansion and decrement the number of outer expansions.
3269
 *
3270
 * The expansion has already been simplified in gist_enter_expansion.
3271
 * If this simplification results in an identity expansion, then
3272
 * it is removed here.
3273
 */
3274
static __isl_give isl_schedule_node *gist_leave_expansion(
3275
  __isl_take isl_schedule_node *node, struct isl_node_gist_data *data)
3276
0
{
3277
0
  int n;
3278
0
  isl_bool identity;
3279
0
  isl_union_map *expansion;
3280
0
3281
0
  expansion = isl_schedule_node_expansion_get_expansion(node);
3282
0
  identity = isl_union_map_is_identity(expansion);
3283
0
  isl_union_map_free(expansion);
3284
0
3285
0
  if (identity < 0)
3286
0
    node = isl_schedule_node_free(node);
3287
0
  else if (identity)
3288
0
    node = isl_schedule_node_delete(node);
3289
0
3290
0
  n = isl_union_set_list_n_union_set(data->filters);
3291
0
  data->filters = isl_union_set_list_drop(data->filters, n - 1, 1);
3292
0
3293
0
  data->n_expansion--;
3294
0
3295
0
  return node;
3296
0
}
3297
3298
/* Enter the extension node "node" during a isl_schedule_node_gist traversal.
3299
 *
3300
 * In particular, add an extra element to data->filters containing
3301
 * the union of the previous element with the additional domain elements
3302
 * introduced by the extension.
3303
 */
3304
static __isl_give isl_schedule_node *gist_enter_extension(
3305
  __isl_take isl_schedule_node *node, struct isl_node_gist_data *data)
3306
0
{
3307
0
  int n;
3308
0
  isl_union_set *inner, *extra;
3309
0
  isl_union_map *extension;
3310
0
3311
0
  n = isl_union_set_list_n_union_set(data->filters);
3312
0
  inner = isl_union_set_list_get_union_set(data->filters, n - 1);
3313
0
  extension = isl_schedule_node_extension_get_extension(node);
3314
0
  extra = isl_union_map_range(extension);
3315
0
  inner = isl_union_set_union(inner, extra);
3316
0
3317
0
  data->filters = isl_union_set_list_add(data->filters, inner);
3318
0
3319
0
  return node;
3320
0
}
3321
3322
/* Can we finish gisting at this node?
3323
 * That is, is the filter on the current filter node a subset of
3324
 * the original context passed to isl_schedule_node_gist?
3325
 * If we have gone through any expansions, then we cannot perform
3326
 * this test since the current domain elements are incomparable
3327
 * to the domain elements in the original context.
3328
 */
3329
static int gist_done(__isl_keep isl_schedule_node *node,
3330
  struct isl_node_gist_data *data)
3331
1.21k
{
3332
1.21k
  isl_union_set *filter, *outer;
3333
1.21k
  int subset;
3334
1.21k
3335
1.21k
  if (data->n_expansion != 0)
3336
0
    return 0;
3337
1.21k
3338
1.21k
  filter = isl_schedule_node_filter_get_filter(node);
3339
1.21k
  outer = isl_union_set_list_get_union_set(data->filters, 0);
3340
1.21k
  subset = isl_union_set_is_subset(filter, outer);
3341
1.21k
  isl_union_set_free(outer);
3342
1.21k
  isl_union_set_free(filter);
3343
1.21k
3344
1.21k
  return subset;
3345
1.21k
}
3346
3347
/* Callback for "traverse" to enter a node and to move
3348
 * to the deepest initial subtree that should be traversed
3349
 * by isl_schedule_node_gist.
3350
 *
3351
 * The "filters" list is extended by one element each time
3352
 * we come across a filter node by the result of intersecting
3353
 * the last element in the list with the filter on the filter node.
3354
 *
3355
 * If the filter on the current filter node is a subset of
3356
 * the original context passed to isl_schedule_node_gist,
3357
 * then there is no need to go into its subtree since it cannot
3358
 * be further simplified by the context.  The "filters" list is
3359
 * still extended for consistency, but the actual value of the
3360
 * added element is immaterial since it will not be used.
3361
 *
3362
 * Otherwise, the filter on the current filter node is replaced by
3363
 * the gist of the original filter with respect to the intersection
3364
 * of the original context with the intermediate filters.
3365
 *
3366
 * If the new element in the "filters" list is empty, then no elements
3367
 * can reach the descendants of the current filter node.  The subtree
3368
 * underneath the filter node is therefore removed.
3369
 *
3370
 * Each expansion node we come across is handled by
3371
 * gist_enter_expansion.
3372
 *
3373
 * Each extension node we come across is handled by
3374
 * gist_enter_extension.
3375
 */
3376
static __isl_give isl_schedule_node *gist_enter(
3377
  __isl_take isl_schedule_node *node, void *user)
3378
1.31k
{
3379
1.31k
  struct isl_node_gist_data *data = user;
3380
1.31k
3381
2.56k
  do {
3382
2.56k
    isl_union_set *filter, *inner;
3383
2.56k
    int done, empty;
3384
2.56k
    int n;
3385
2.56k
3386
2.56k
    switch (isl_schedule_node_get_type(node)) {
3387
2.56k
    case isl_schedule_node_error:
3388
0
      return isl_schedule_node_free(node);
3389
2.56k
    case isl_schedule_node_expansion:
3390
0
      node = gist_enter_expansion(node, data);
3391
0
      continue;
3392
2.56k
    case isl_schedule_node_extension:
3393
0
      node = gist_enter_extension(node, data);
3394
0
      continue;
3395
2.56k
    case isl_schedule_node_band:
3396
1.35k
    case isl_schedule_node_context:
3397
1.35k
    case isl_schedule_node_domain:
3398
1.35k
    case isl_schedule_node_guard:
3399
1.35k
    case isl_schedule_node_leaf:
3400
1.35k
    case isl_schedule_node_mark:
3401
1.35k
    case isl_schedule_node_sequence:
3402
1.35k
    case isl_schedule_node_set:
3403
1.35k
      continue;
3404
1.35k
    case isl_schedule_node_filter:
3405
1.21k
      break;
3406
1.21k
    }
3407
1.21k
    done = gist_done(node, data);
3408
1.21k
    filter = isl_schedule_node_filter_get_filter(node);
3409
1.21k
    if (done < 0 || done) {
3410
111
      data->filters = isl_union_set_list_add(data->filters,
3411
111
                filter);
3412
111
      if (done < 0)
3413
0
        return isl_schedule_node_free(node);
3414
111
      return node;
3415
111
    }
3416
1.10k
    n = isl_union_set_list_n_union_set(data->filters);
3417
1.10k
    inner = isl_union_set_list_get_union_set(data->filters, n - 1);
3418
1.10k
    filter = isl_union_set_gist(filter, isl_union_set_copy(inner));
3419
1.10k
    node = isl_schedule_node_filter_set_filter(node,
3420
1.10k
            isl_union_set_copy(filter));
3421
1.10k
    filter = isl_union_set_intersect(filter, inner);
3422
1.10k
    empty = isl_union_set_is_empty(filter);
3423
1.10k
    data->filters = isl_union_set_list_add(data->filters, filter);
3424
1.10k
    if (empty < 0)
3425
0
      return isl_schedule_node_free(node);
3426
1.10k
    if (!empty)
3427
660
      continue;
3428
441
    node = isl_schedule_node_child(node, 0);
3429
441
    node = isl_schedule_node_cut(node);
3430
441
    node = isl_schedule_node_parent(node);
3431
441
    return node;
3432
2.01k
  } while (isl_schedule_node_has_children(node) &&
3433
2.01k
    
(node = isl_schedule_node_first_child(node)) != NULL1.25k
);
3434
1.31k
3435
1.31k
  
return node759
;
3436
1.31k
}
3437
3438
/* Callback for "traverse" to leave a node for isl_schedule_node_gist.
3439
 *
3440
 * In particular, if the current node is a filter node, then we remove
3441
 * the element on the "filters" list that was added when we entered
3442
 * the node.  There is no need to compute any gist here, since we
3443
 * already did that when we entered the node.
3444
 *
3445
 * Expansion nodes are handled by gist_leave_expansion.
3446
 *
3447
 * If the current node is an extension, then remove the element
3448
 * in data->filters that was added by gist_enter_extension.
3449
 *
3450
 * If the current node is a band node, then we compute the gist of
3451
 * the band node with respect to the intersection of the original context
3452
 * and the intermediate filters.
3453
 *
3454
 * If the current node is a sequence or set node, then some of
3455
 * the filter children may have become empty and so they are removed.
3456
 * If only one child is left, then the set or sequence node along with
3457
 * the single remaining child filter is removed.  The filter can be
3458
 * removed because the filters on a sequence or set node are supposed
3459
 * to partition the incoming domain instances.
3460
 * In principle, it should then be impossible for there to be zero
3461
 * remaining children, but should this happen, we replace the entire
3462
 * subtree with an empty filter.
3463
 */
3464
static __isl_give isl_schedule_node *gist_leave(
3465
  __isl_take isl_schedule_node *node, void *user)
3466
2.56k
{
3467
2.56k
  struct isl_node_gist_data *data = user;
3468
2.56k
  isl_schedule_tree *tree;
3469
2.56k
  int i, n;
3470
2.56k
  isl_union_set *filter;
3471
2.56k
3472
2.56k
  switch (isl_schedule_node_get_type(node)) {
3473
2.56k
  case isl_schedule_node_error:
3474
0
    return isl_schedule_node_free(node);
3475
2.56k
  case isl_schedule_node_expansion:
3476
0
    node = gist_leave_expansion(node, data);
3477
0
    break;
3478
2.56k
  case isl_schedule_node_extension:
3479
1.21k
  case isl_schedule_node_filter:
3480
1.21k
    n = isl_union_set_list_n_union_set(data->filters);
3481
1.21k
    data->filters = isl_union_set_list_drop(data->filters,
3482
1.21k
              n - 1, 1);
3483
1.21k
    break;
3484
1.21k
  case isl_schedule_node_band:
3485
291
    n = isl_union_set_list_n_union_set(data->filters);
3486
291
    filter = isl_union_set_list_get_union_set(data->filters, n - 1);
3487
291
    node = isl_schedule_node_band_gist(node, filter);
3488
291
    break;
3489
1.21k
  case isl_schedule_node_set:
3490
307
  case isl_schedule_node_sequence:
3491
307
    tree = isl_schedule_node_get_tree(node);
3492
307
    n = isl_schedule_tree_n_children(tree);
3493
1.51k
    for (i = n - 1; i >= 0; 
--i1.21k
) {
3494
1.21k
      isl_schedule_tree *child;
3495
1.21k
      isl_union_set *filter;
3496
1.21k
      int empty;
3497
1.21k
3498
1.21k
      child = isl_schedule_tree_get_child(tree, i);
3499
1.21k
      filter = isl_schedule_tree_filter_get_filter(child);
3500
1.21k
      empty = isl_union_set_is_empty(filter);
3501
1.21k
      isl_union_set_free(filter);
3502
1.21k
      isl_schedule_tree_free(child);
3503
1.21k
      if (empty < 0)
3504
0
        tree = isl_schedule_tree_free(tree);
3505
1.21k
      else if (empty)
3506
441
        tree = isl_schedule_tree_drop_child(tree, i);
3507
1.21k
    }
3508
307
    n = isl_schedule_tree_n_children(tree);
3509
307
    node = isl_schedule_node_graft_tree(node, tree);
3510
307
    if (n == 1) {
3511
171
      node = isl_schedule_node_delete(node);
3512
171
      node = isl_schedule_node_delete(node);
3513
171
    } else 
if (136
n == 0136
) {
3514
0
      isl_space *space;
3515
0
3516
0
      filter =
3517
0
          isl_union_set_list_get_union_set(data->filters, 0);
3518
0
      space = isl_union_set_get_space(filter);
3519
0
      isl_union_set_free(filter);
3520
0
      filter = isl_union_set_empty(space);
3521
0
      node = isl_schedule_node_cut(node);
3522
0
      node = isl_schedule_node_insert_filter(node, filter);
3523
0
    }
3524
307
    break;
3525
759
  case isl_schedule_node_context:
3526
759
  case isl_schedule_node_domain:
3527
759
  case isl_schedule_node_guard:
3528
759
  case isl_schedule_node_leaf:
3529
759
  case isl_schedule_node_mark:
3530
759
    break;
3531
2.56k
  }
3532
2.56k
3533
2.56k
  return node;
3534
2.56k
}
3535
3536
/* Compute the gist of the subtree at "node" with respect to
3537
 * the reaching domain elements in "context".
3538
 * In particular, compute the gist of all band and filter nodes
3539
 * in the subtree with respect to "context".  Children of set or sequence
3540
 * nodes that end up with an empty filter are removed completely.
3541
 *
3542
 * We keep track of the intersection of "context" with all outer filters
3543
 * of the current node within the subtree in the final element of "filters".
3544
 * Initially, this list contains the single element "context" and it is
3545
 * extended or shortened each time we enter or leave a filter node.
3546
 */
3547
__isl_give isl_schedule_node *isl_schedule_node_gist(
3548
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *context)
3549
406
{
3550
406
  struct isl_node_gist_data data;
3551
406
3552
406
  data.n_expansion = 0;
3553
406
  data.filters = isl_union_set_list_from_union_set(context);
3554
406
  node = traverse(node, &gist_enter, &gist_leave, &data);
3555
406
  isl_union_set_list_free(data.filters);
3556
406
  return node;
3557
406
}
3558
3559
/* Intersect the domain of domain node "node" with "domain".
3560
 *
3561
 * If the domain of "node" is already a subset of "domain",
3562
 * then nothing needs to be changed.
3563
 *
3564
 * Otherwise, we replace the domain of the domain node by the intersection
3565
 * and simplify the subtree rooted at "node" with respect to this intersection.
3566
 */
3567
__isl_give isl_schedule_node *isl_schedule_node_domain_intersect_domain(
3568
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *domain)
3569
3.10k
{
3570
3.10k
  isl_schedule_tree *tree;
3571
3.10k
  isl_union_set *uset;
3572
3.10k
  int is_subset;
3573
3.10k
3574
3.10k
  if (!node || !domain)
3575
0
    goto error;
3576
3.10k
3577
3.10k
  uset = isl_schedule_tree_domain_get_domain(node->tree);
3578
3.10k
  is_subset = isl_union_set_is_subset(uset, domain);
3579
3.10k
  isl_union_set_free(uset);
3580
3.10k
  if (is_subset < 0)
3581
0
    goto error;
3582
3.10k
  if (is_subset) {
3583
2.85k
    isl_union_set_free(domain);
3584
2.85k
    return node;
3585
2.85k
  }
3586
252
3587
252
  tree = isl_schedule_tree_copy(node->tree);
3588
252
  uset = isl_schedule_tree_domain_get_domain(tree);
3589
252
  uset = isl_union_set_intersect(uset, domain);
3590
252
  tree = isl_schedule_tree_domain_set_domain(tree,
3591
252
                isl_union_set_copy(uset));
3592
252
  node = isl_schedule_node_graft_tree(node, tree);
3593
252
3594
252
  node = isl_schedule_node_child(node, 0);
3595
252
  node = isl_schedule_node_gist(node, uset);
3596
252
  node = isl_schedule_node_parent(node);
3597
252
3598
252
  return node;
3599
0
error:
3600
0
  isl_schedule_node_free(node);
3601
0
  isl_union_set_free(domain);
3602
0
  return NULL;
3603
252
}
3604
3605
/* Replace the domain of domain node "node" with the gist
3606
 * of the original domain with respect to the parameter domain "context".
3607
 */
3608
__isl_give isl_schedule_node *isl_schedule_node_domain_gist_params(
3609
  __isl_take isl_schedule_node *node, __isl_take isl_set *context)
3610
1.14k
{
3611
1.14k
  isl_union_set *domain;
3612
1.14k
  isl_schedule_tree *tree;
3613
1.14k
3614
1.14k
  if (!node || !context)
3615
0
    goto error;
3616
1.14k
3617
1.14k
  tree = isl_schedule_tree_copy(node->tree);
3618
1.14k
  domain = isl_schedule_tree_domain_get_domain(node->tree);
3619
1.14k
  domain = isl_union_set_gist_params(domain, context);
3620
1.14k
  tree = isl_schedule_tree_domain_set_domain(tree, domain);
3621
1.14k
  node = isl_schedule_node_graft_tree(node, tree);
3622
1.14k
3623
1.14k
  return node;
3624
0
error:
3625
0
  isl_schedule_node_free(node);
3626
0
  isl_set_free(context);
3627
0
  return NULL;
3628
1.14k
}
3629
3630
/* Internal data structure for isl_schedule_node_get_subtree_expansion.
3631
 * "expansions" contains a list of accumulated expansions
3632
 * for each outer expansion, set or sequence node.  The first element
3633
 * in the list is an identity mapping on the reaching domain elements.
3634
 * "res" collects the results.
3635
 */
3636
struct isl_subtree_expansion_data {
3637
  isl_union_map_list *expansions;
3638
  isl_union_map *res;
3639
};
3640
3641
/* Callback for "traverse" to enter a node and to move
3642
 * to the deepest initial subtree that should be traversed
3643
 * by isl_schedule_node_get_subtree_expansion.
3644
 *
3645
 * Whenever we come across an expansion node, the last element
3646
 * of data->expansions is combined with the expansion
3647
 * on the expansion node.
3648
 *
3649
 * Whenever we come across a filter node that is the child
3650
 * of a set or sequence node, data->expansions is extended
3651
 * with a new element that restricts the previous element
3652
 * to the elements selected by the filter.
3653
 * The previous element can then be reused while backtracking.
3654
 */
3655
static __isl_give isl_schedule_node *subtree_expansion_enter(
3656
  __isl_take isl_schedule_node *node, void *user)
3657
3
{
3658
3
  struct isl_subtree_expansion_data *data = user;
3659
3
3660
12
  do {
3661
12
    enum isl_schedule_node_type type;
3662
12
    isl_union_set *filter;
3663
12
    isl_union_map *inner, *expansion;
3664
12
    int n;
3665
12
3666
12
    switch (isl_schedule_node_get_type(node)) {
3667
12
    case isl_schedule_node_error:
3668
0
      return isl_schedule_node_free(node);
3669
12
    case isl_schedule_node_filter:
3670
4
      type = isl_schedule_node_get_parent_type(node);
3671
4
      if (type != isl_schedule_node_set &&
3672
4
          type != isl_schedule_node_sequence)
3673
0
        break;
3674
4
      filter = isl_schedule_node_filter_get_filter(node);
3675
4
      n = isl_union_map_list_n_union_map(data->expansions);
3676
4
      inner =
3677
4
          isl_union_map_list_get_union_map(data->expansions,
3678
4
                n - 1);
3679
4
      inner = isl_union_map_intersect_range(inner, filter);
3680
4
      data->expansions =
3681
4
          isl_union_map_list_add(data->expansions, inner);
3682
4
      break;
3683
4
    case isl_schedule_node_expansion:
3684
2
      n = isl_union_map_list_n_union_map(data->expansions);
3685
2
      expansion =
3686
2
        isl_schedule_node_expansion_get_expansion(node);
3687
2
      inner =
3688
2
          isl_union_map_list_get_union_map(data->expansions,
3689
2
                n - 1);
3690
2
      inner = isl_union_map_apply_range(inner, expansion);
3691
2
      data->expansions =
3692
2
          isl_union_map_list_set_union_map(data->expansions,
3693
2
                n - 1, inner);
3694
2
      break;
3695
6
    case isl_schedule_node_band:
3696
6
    case isl_schedule_node_context:
3697
6
    case isl_schedule_node_domain:
3698
6
    case isl_schedule_node_extension:
3699
6
    case isl_schedule_node_guard:
3700
6
    case isl_schedule_node_leaf:
3701
6
    case isl_schedule_node_mark:
3702
6
    case isl_schedule_node_sequence:
3703
6
    case isl_schedule_node_set:
3704
6
      break;
3705
12
    }
3706
12
  } while (isl_schedule_node_has_children(node) &&
3707
12
    
(node = isl_schedule_node_first_child(node)) != NULL9
);
3708
3
3709
3
  return node;
3710
3
}
3711
3712
/* Callback for "traverse" to leave a node for
3713
 * isl_schedule_node_get_subtree_expansion.
3714
 *
3715
 * If we come across a filter node that is the child
3716
 * of a set or sequence node, then we remove the element
3717
 * of data->expansions that was added in subtree_expansion_enter.
3718
 *
3719
 * If we reach a leaf node, then the accumulated expansion is
3720
 * added to data->res.
3721
 */
3722
static __isl_give isl_schedule_node *subtree_expansion_leave(
3723
  __isl_take isl_schedule_node *node, void *user)
3724
12
{
3725
12
  struct isl_subtree_expansion_data *data = user;
3726
12
  int n;
3727
12
  isl_union_map *inner;
3728
12
  enum isl_schedule_node_type type;
3729
12
3730
12
  switch (isl_schedule_node_get_type(node)) {
3731
12
  case isl_schedule_node_error:
3732
0
    return isl_schedule_node_free(node);
3733
12
  case isl_schedule_node_filter:
3734
4
    type = isl_schedule_node_get_parent_type(node);
3735
4
    if (type != isl_schedule_node_set &&
3736
4
        type != isl_schedule_node_sequence)
3737
0
      break;
3738
4
    n = isl_union_map_list_n_union_map(data->expansions);
3739
4
    data->expansions = isl_union_map_list_drop(data->expansions,
3740
4
              n - 1, 1);
3741
4
    break;
3742
4
  case isl_schedule_node_leaf:
3743
3
    n = isl_union_map_list_n_union_map(data->expansions);
3744
3
    inner = isl_union_map_list_get_union_map(data->expansions,
3745
3
              n - 1);
3746
3
    data->res = isl_union_map_union(data->res, inner);
3747
3
    break;
3748
5
  case isl_schedule_node_band:
3749
5
  case isl_schedule_node_context:
3750
5
  case isl_schedule_node_domain:
3751
5
  case isl_schedule_node_expansion:
3752
5
  case isl_schedule_node_extension:
3753
5
  case isl_schedule_node_guard:
3754
5
  case isl_schedule_node_mark:
3755
5
  case isl_schedule_node_sequence:
3756
5
  case isl_schedule_node_set:
3757
5
    break;
3758
12
  }
3759
12
3760
12
  return node;
3761
12
}
3762
3763
/* Return a mapping from the domain elements that reach "node"
3764
 * to the corresponding domain elements in the leaves of the subtree
3765
 * rooted at "node" obtained by composing the intermediate expansions.
3766
 *
3767
 * We start out with an identity mapping between the domain elements
3768
 * that reach "node" and compose it with all the expansions
3769
 * on a path from "node" to a leaf while traversing the subtree.
3770
 * Within the children of an a sequence or set node, the
3771
 * accumulated expansion is restricted to the elements selected
3772
 * by the filter child.
3773
 */
3774
__isl_give isl_union_map *isl_schedule_node_get_subtree_expansion(
3775
  __isl_keep isl_schedule_node *node)
3776
1
{
3777
1
  struct isl_subtree_expansion_data data;
3778
1
  isl_space *space;
3779
1
  isl_union_set *domain;
3780
1
  isl_union_map *expansion;
3781
1
3782
1
  if (!node)
3783
0
    return NULL;
3784
1
3785
1
  domain = isl_schedule_node_get_universe_domain(node);
3786
1
  space = isl_union_set_get_space(domain);
3787
1
  expansion = isl_union_set_identity(domain);
3788
1
  data.res = isl_union_map_empty(space);
3789
1
  data.expansions = isl_union_map_list_from_union_map(expansion);
3790
1
3791
1
  node = isl_schedule_node_copy(node);
3792
1
  node = traverse(node, &subtree_expansion_enter,
3793
1
      &subtree_expansion_leave, &data);
3794
1
  if (!node)
3795
0
    data.res = isl_union_map_free(data.res);
3796
1
  isl_schedule_node_free(node);
3797
1
3798
1
  isl_union_map_list_free(data.expansions);
3799
1
3800
1
  return data.res;
3801
1
}
3802
3803
/* Internal data structure for isl_schedule_node_get_subtree_contraction.
3804
 * "contractions" contains a list of accumulated contractions
3805
 * for each outer expansion, set or sequence node.  The first element
3806
 * in the list is an identity mapping on the reaching domain elements.
3807
 * "res" collects the results.
3808
 */
3809
struct isl_subtree_contraction_data {
3810
  isl_union_pw_multi_aff_list *contractions;
3811
  isl_union_pw_multi_aff *res;
3812
};
3813
3814
/* Callback for "traverse" to enter a node and to move
3815
 * to the deepest initial subtree that should be traversed
3816
 * by isl_schedule_node_get_subtree_contraction.
3817
 *
3818
 * Whenever we come across an expansion node, the last element
3819
 * of data->contractions is combined with the contraction
3820
 * on the expansion node.
3821
 *
3822
 * Whenever we come across a filter node that is the child
3823
 * of a set or sequence node, data->contractions is extended
3824
 * with a new element that restricts the previous element
3825
 * to the elements selected by the filter.
3826
 * The previous element can then be reused while backtracking.
3827
 */
3828
static __isl_give isl_schedule_node *subtree_contraction_enter(
3829
  __isl_take isl_schedule_node *node, void *user)
3830
7
{
3831
7
  struct isl_subtree_contraction_data *data = user;
3832
7
3833
21
  do {
3834
21
    enum isl_schedule_node_type type;
3835
21
    isl_union_set *filter;
3836
21
    isl_union_pw_multi_aff *inner, *contraction;
3837
21
    int n;
3838
21
3839
21
    switch (isl_schedule_node_get_type(node)) {
3840
21
    case isl_schedule_node_error:
3841
0
      return isl_schedule_node_free(node);
3842
21
    case isl_schedule_node_filter:
3843
0
      type = isl_schedule_node_get_parent_type(node);
3844
0
      if (type != isl_schedule_node_set &&
3845
0
          type != isl_schedule_node_sequence)
3846
0
        break;
3847
0
      filter = isl_schedule_node_filter_get_filter(node);
3848
0
      n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff(
3849
0
            data->contractions);
3850
0
      inner =
3851
0
          isl_union_pw_multi_aff_list_get_union_pw_multi_aff(
3852
0
            data->contractions, n - 1);
3853
0
      inner = isl_union_pw_multi_aff_intersect_domain(inner,
3854
0
                filter);
3855
0
      data->contractions =
3856
0
          isl_union_pw_multi_aff_list_add(data->contractions,
3857
0
                inner);
3858
0
      break;
3859
0
    case isl_schedule_node_expansion:
3860
0
      n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff(
3861
0
            data->contractions);
3862
0
      contraction =
3863
0
          isl_schedule_node_expansion_get_contraction(node);
3864
0
      inner =
3865
0
          isl_union_pw_multi_aff_list_get_union_pw_multi_aff(
3866
0
            data->contractions, n - 1);
3867
0
      inner =
3868
0
          isl_union_pw_multi_aff_pullback_union_pw_multi_aff(
3869
0
            inner, contraction);
3870
0
      data->contractions =
3871
0
          isl_union_pw_multi_aff_list_set_union_pw_multi_aff(
3872
0
          data->contractions, n - 1, inner);
3873
0
      break;
3874
21
    case isl_schedule_node_band:
3875
21
    case isl_schedule_node_context:
3876
21
    case isl_schedule_node_domain:
3877
21
    case isl_schedule_node_extension:
3878
21
    case isl_schedule_node_guard:
3879
21
    case isl_schedule_node_leaf:
3880
21
    case isl_schedule_node_mark:
3881
21
    case isl_schedule_node_sequence:
3882
21
    case isl_schedule_node_set:
3883
21
      break;
3884
21
    }
3885
21
  } while (isl_schedule_node_has_children(node) &&
3886
21
    
(node = isl_schedule_node_first_child(node)) != NULL14
);
3887
7
3888
7
  return node;
3889
7
}
3890
3891
/* Callback for "traverse" to leave a node for
3892
 * isl_schedule_node_get_subtree_contraction.
3893
 *
3894
 * If we come across a filter node that is the child
3895
 * of a set or sequence node, then we remove the element
3896
 * of data->contractions that was added in subtree_contraction_enter.
3897
 *
3898
 * If we reach a leaf node, then the accumulated contraction is
3899
 * added to data->res.
3900
 */
3901
static __isl_give isl_schedule_node *subtree_contraction_leave(
3902
  __isl_take isl_schedule_node *node, void *user)
3903
21
{
3904
21
  struct isl_subtree_contraction_data *data = user;
3905
21
  int n;
3906
21
  isl_union_pw_multi_aff *inner;
3907
21
  enum isl_schedule_node_type type;
3908
21
3909
21
  switch (isl_schedule_node_get_type(node)) {
3910
21
  case isl_schedule_node_error:
3911
0
    return isl_schedule_node_free(node);
3912
21
  case isl_schedule_node_filter:
3913
0
    type = isl_schedule_node_get_parent_type(node);
3914
0
    if (type != isl_schedule_node_set &&
3915
0
        type != isl_schedule_node_sequence)
3916
0
      break;
3917
0
    n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff(
3918
0
            data->contractions);
3919
0
    data->contractions =
3920
0
      isl_union_pw_multi_aff_list_drop(data->contractions,
3921
0
              n - 1, 1);
3922
0
    break;
3923
7
  case isl_schedule_node_leaf:
3924
7
    n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff(
3925
7
            data->contractions);
3926
7
    inner = isl_union_pw_multi_aff_list_get_union_pw_multi_aff(
3927
7
            data->contractions, n - 1);
3928
7
    data->res = isl_union_pw_multi_aff_union_add(data->res, inner);
3929
7
    break;
3930
14
  case isl_schedule_node_band:
3931
14
  case isl_schedule_node_context:
3932
14
  case isl_schedule_node_domain:
3933
14
  case isl_schedule_node_expansion:
3934
14
  case isl_schedule_node_extension:
3935
14
  case isl_schedule_node_guard:
3936
14
  case isl_schedule_node_mark:
3937
14
  case isl_schedule_node_sequence:
3938
14
  case isl_schedule_node_set:
3939
14
    break;
3940
21
  }
3941
21
3942
21
  return node;
3943
21
}
3944
3945
/* Return a mapping from the domain elements in the leaves of the subtree
3946
 * rooted at "node" to the corresponding domain elements that reach "node"
3947
 * obtained by composing the intermediate contractions.
3948
 *
3949
 * We start out with an identity mapping between the domain elements
3950
 * that reach "node" and compose it with all the contractions
3951
 * on a path from "node" to a leaf while traversing the subtree.
3952
 * Within the children of an a sequence or set node, the
3953
 * accumulated contraction is restricted to the elements selected
3954
 * by the filter child.
3955
 */
3956
__isl_give isl_union_pw_multi_aff *isl_schedule_node_get_subtree_contraction(
3957
  __isl_keep isl_schedule_node *node)
3958
7
{
3959
7
  struct isl_subtree_contraction_data data;
3960
7
  isl_space *space;
3961
7
  isl_union_set *domain;
3962
7
  isl_union_pw_multi_aff *contraction;
3963
7
3964
7
  if (!node)
3965
0
    return NULL;
3966
7
3967
7
  domain = isl_schedule_node_get_universe_domain(node);
3968
7
  space = isl_union_set_get_space(domain);
3969
7
  contraction = isl_union_set_identity_union_pw_multi_aff(domain);
3970
7
  data.res = isl_union_pw_multi_aff_empty(space);
3971
7
  data.contractions =
3972
7
      isl_union_pw_multi_aff_list_from_union_pw_multi_aff(contraction);
3973
7
3974
7
  node = isl_schedule_node_copy(node);
3975
7
  node = traverse(node, &subtree_contraction_enter,
3976
7
      &subtree_contraction_leave, &data);
3977
7
  if (!node)
3978
0
    data.res = isl_union_pw_multi_aff_free(data.res);
3979
7
  isl_schedule_node_free(node);
3980
7
3981
7
  isl_union_pw_multi_aff_list_free(data.contractions);
3982
7
3983
7
  return data.res;
3984
7
}
3985
3986
/* Do the nearest "n" ancestors of "node" have the types given in "types"
3987
 * (starting at the parent of "node")?
3988
 */
3989
static int has_ancestors(__isl_keep isl_schedule_node *node,
3990
  int n, enum isl_schedule_node_type *types)
3991
24
{
3992
24
  int i, n_ancestor;
3993
24
3994
24
  if (!node)
3995
0
    return -1;
3996
24
3997
24
  n_ancestor = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
3998
24
  if (n_ancestor < n)
3999
0
    return 0;
4000
24
4001
24
  for (i = 0; i < n; 
++i0
) {
4002
24
    isl_schedule_tree *tree;
4003
24
    int correct_type;
4004
24
4005
24
    tree = isl_schedule_tree_list_get_schedule_tree(node->ancestors,
4006
24
                  n_ancestor - 1 - i);
4007
24
    if (!tree)
4008
0
      return -1;
4009
24
    correct_type = isl_schedule_tree_get_type(tree) == types[i];
4010
24
    isl_schedule_tree_free(tree);
4011
24
    if (!correct_type)
4012
24
      return 0;
4013
24
  }
4014
24
4015
24
  
return 10
;
4016
24
}
4017
4018
/* Given a node "node" that appears in an extension (i.e., it is the child
4019
 * of a filter in a sequence inside an extension node), are the spaces
4020
 * of the extension specified by "extension" disjoint from those
4021
 * of both the original extension and the domain elements that reach
4022
 * that original extension?
4023
 */
4024
static int is_disjoint_extension(__isl_keep isl_schedule_node *node,
4025
  __isl_keep isl_union_map *extension)
4026
0
{
4027
0
  isl_union_map *old;
4028
0
  isl_union_set *domain;
4029
0
  int empty;
4030
0
4031
0
  node = isl_schedule_node_copy(node);
4032
0
  node = isl_schedule_node_parent(node);
4033
0
  node = isl_schedule_node_parent(node);
4034
0
  node = isl_schedule_node_parent(node);
4035
0
  old = isl_schedule_node_extension_get_extension(node);
4036
0
  domain = isl_schedule_node_get_universe_domain(node);
4037
0
  isl_schedule_node_free(node);
4038
0
  old = isl_union_map_universe(old);
4039
0
  domain = isl_union_set_union(domain, isl_union_map_range(old));
4040
0
  extension = isl_union_map_copy(extension);
4041
0
  extension = isl_union_map_intersect_range(extension, domain);
4042
0
  empty = isl_union_map_is_empty(extension);
4043
0
  isl_union_map_free(extension);
4044
0
4045
0
  return empty;
4046
0
}
4047
4048
/* Given a node "node" that is governed by an extension node, extend
4049
 * that extension node with "extension".
4050
 *
4051
 * In particular, "node" is the child of a filter in a sequence that
4052
 * is in turn a child of an extension node.  Extend that extension node
4053
 * with "extension".
4054
 *
4055
 * Return a pointer to the parent of the original node (i.e., a filter).
4056
 */
4057
static __isl_give isl_schedule_node *extend_extension(
4058
  __isl_take isl_schedule_node *node, __isl_take isl_union_map *extension)
4059
0
{
4060
0
  int pos;
4061
0
  int disjoint;
4062
0
  isl_union_map *node_extension;
4063
0
4064
0
  node = isl_schedule_node_parent(node);
4065
0
  pos = isl_schedule_node_get_child_position(node);
4066
0
  node = isl_schedule_node_parent(node);
4067
0
  node = isl_schedule_node_parent(node);
4068
0
  node_extension = isl_schedule_node_extension_get_extension(node);
4069
0
  disjoint = isl_union_map_is_disjoint(extension, node_extension);
4070
0
  extension = isl_union_map_union(extension, node_extension);
4071
0
  node = isl_schedule_node_extension_set_extension(node, extension);
4072
0
  node = isl_schedule_node_child(node, 0);
4073
0
  node = isl_schedule_node_child(node, pos);
4074
0
4075
0
  if (disjoint < 0)
4076
0
    return isl_schedule_node_free(node);
4077
0
  if (!node)
4078
0
    return NULL;
4079
0
  if (!disjoint)
4080
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
4081
0
      "extension domain should be disjoint from earlier "
4082
0
      "extensions", return isl_schedule_node_free(node));
4083
0
4084
0
  return node;
4085
0
}
4086
4087
/* Return the universe of "uset" if this universe is disjoint from "ref".
4088
 * Otherwise, return "uset".
4089
 *
4090
 * Also check if "uset" itself is disjoint from "ref", reporting
4091
 * an error if it is not.
4092
 */
4093
static __isl_give isl_union_set *replace_by_universe_if_disjoint(
4094
  __isl_take isl_union_set *uset, __isl_keep isl_union_set *ref)
4095
48
{
4096
48
  int disjoint;
4097
48
  isl_union_set *universe;
4098
48
4099
48
  disjoint = isl_union_set_is_disjoint(uset, ref);
4100
48
  if (disjoint < 0)
4101
0
    return isl_union_set_free(uset);
4102
48
  if (!disjoint)
4103
48
    
isl_die0
(isl_union_set_get_ctx(uset), isl_error_invalid,
4104
48
      "extension domain should be disjoint from "
4105
48
      "current domain", return isl_union_set_free(uset));
4106
48
4107
48
  universe = isl_union_set_universe(isl_union_set_copy(uset));
4108
48
  disjoint = isl_union_set_is_disjoint(universe, ref);
4109
48
  if (disjoint >= 0 && disjoint) {
4110
48
    isl_union_set_free(uset);
4111
48
    return universe;
4112
48
  }
4113
0
  isl_union_set_free(universe);
4114
0
4115
0
  if (disjoint < 0)
4116
0
    return isl_union_set_free(uset);
4117
0
  return uset;
4118
0
}
4119
4120
/* Insert an extension node on top of "node" with extension "extension".
4121
 * In addition, insert a filter that separates node from the extension
4122
 * between the extension node and "node".
4123
 * Return a pointer to the inserted filter node.
4124
 *
4125
 * If "node" already appears in an extension (i.e., if it is the child
4126
 * of a filter in a sequence inside an extension node), then extend that
4127
 * extension with "extension" instead.
4128
 * In this case, a pointer to the original filter node is returned.
4129
 * Note that if some of the elements in the new extension live in the
4130
 * same space as those of the original extension or the domain elements
4131
 * reaching the original extension, then we insert a new extension anyway.
4132
 * Otherwise, we would have to adjust the filters in the sequence child
4133
 * of the extension to ensure that the elements in the new extension
4134
 * are filtered out.
4135
 */
4136
static __isl_give isl_schedule_node *insert_extension(
4137
  __isl_take isl_schedule_node *node, __isl_take isl_union_map *extension)
4138
24
{
4139
24
  enum isl_schedule_node_type ancestors[] =
4140
24
    { isl_schedule_node_filter, isl_schedule_node_sequence,
4141
24
      isl_schedule_node_extension };
4142
24
  isl_union_set *domain;
4143
24
  isl_union_set *filter;
4144
24
  int in_ext;
4145
24
4146
24
  in_ext = has_ancestors(node, 3, ancestors);
4147
24
  if (in_ext < 0)
4148
0
    goto error;
4149
24
  if (in_ext) {
4150
0
    int disjoint;
4151
0
4152
0
    disjoint = is_disjoint_extension(node, extension);
4153
0
    if (disjoint < 0)
4154
0
      goto error;
4155
0
    if (disjoint)
4156
0
      return extend_extension(node, extension);
4157
24
  }
4158
24
4159
24
  filter = isl_schedule_node_get_domain(node);
4160
24
  domain = isl_union_map_range(isl_union_map_copy(extension));
4161
24
  filter = replace_by_universe_if_disjoint(filter, domain);
4162
24
  isl_union_set_free(domain);
4163
24
4164
24
  node = isl_schedule_node_insert_filter(node, filter);
4165
24
  node = isl_schedule_node_insert_extension(node, extension);
4166
24
  node = isl_schedule_node_child(node, 0);
4167
24
  return node;
4168
0
error:
4169
0
  isl_schedule_node_free(node);
4170
0
  isl_union_map_free(extension);
4171
0
  return NULL;
4172
24
}
4173
4174
/* Replace the subtree that "node" points to by "tree" (which has
4175
 * a sequence root with two children), except if the parent of "node"
4176
 * is a sequence as well, in which case "tree" is spliced at the position
4177
 * of "node" in its parent.
4178
 * Return a pointer to the child of the "tree_pos" (filter) child of "tree"
4179
 * in the updated schedule tree.
4180
 */
4181
static __isl_give isl_schedule_node *graft_or_splice(
4182
  __isl_take isl_schedule_node *node, __isl_take isl_schedule_tree *tree,
4183
  int tree_pos)
4184
24
{
4185
24
  int pos;
4186
24
4187
24
  if (isl_schedule_node_get_parent_type(node) ==
4188
24
      isl_schedule_node_sequence) {
4189
0
    pos = isl_schedule_node_get_child_position(node);
4190
0
    node = isl_schedule_node_parent(node);
4191
0
    node = isl_schedule_node_sequence_splice(node, pos, tree);
4192
24
  } else {
4193
24
    pos = 0;
4194
24
    node = isl_schedule_node_graft_tree(node, tree);
4195
24
  }
4196
24
  node = isl_schedule_node_child(node, pos + tree_pos);
4197
24
  node = isl_schedule_node_child(node, 0);
4198
24
4199
24
  return node;
4200
24
}
4201
4202
/* Insert a node "graft" into the schedule tree of "node" such that it
4203
 * is executed before (if "before" is set) or after (if "before" is not set)
4204
 * the node that "node" points to.
4205
 * The root of "graft" is an extension node.
4206
 * Return a pointer to the node that "node" pointed to.
4207
 *
4208
 * We first insert an extension node on top of "node" (or extend
4209
 * the extension node if there already is one), with a filter on "node"
4210
 * separating it from the extension.
4211
 * We then insert a filter in the graft to separate it from the original
4212
 * domain elements and combine the original and new tree in a sequence.
4213
 * If we have extended an extension node, then the children of this
4214
 * sequence are spliced in the sequence of the extended extension
4215
 * at the position where "node" appears in the original extension.
4216
 * Otherwise, the sequence pair is attached to the new extension node.
4217
 */
4218
static __isl_give isl_schedule_node *graft_extension(
4219
  __isl_take isl_schedule_node *node, __isl_take isl_schedule_node *graft,
4220
  int before)
4221
24
{
4222
24
  isl_union_map *extension;
4223
24
  isl_union_set *graft_domain;
4224
24
  isl_union_set *node_domain;
4225
24
  isl_schedule_tree *tree, *tree_graft;
4226
24
4227
24
  extension = isl_schedule_node_extension_get_extension(graft);
4228
24
  graft_domain = isl_union_map_range(isl_union_map_copy(extension));
4229
24
  node_domain = isl_schedule_node_get_universe_domain(node);
4230
24
  node = insert_extension(node, extension);
4231
24
4232
24
  graft_domain = replace_by_universe_if_disjoint(graft_domain,
4233
24
              node_domain);
4234
24
  isl_union_set_free(node_domain);
4235
24
4236
24
  tree = isl_schedule_node_get_tree(node);
4237
24
  if (!isl_schedule_node_has_children(graft)) {
4238
0
    tree_graft = isl_schedule_tree_from_filter(graft_domain);
4239
24
  } else {
4240
24
    graft = isl_schedule_node_child(graft, 0);
4241
24
    tree_graft = isl_schedule_node_get_tree(graft);
4242
24
    tree_graft = isl_schedule_tree_insert_filter(tree_graft,
4243
24
                graft_domain);
4244
24
  }
4245
24
  if (before)
4246
24
    tree = isl_schedule_tree_sequence_pair(tree_graft, tree);
4247
0
  else
4248
0
    tree = isl_schedule_tree_sequence_pair(tree, tree_graft);
4249
24
  node = graft_or_splice(node, tree, before);
4250
24
4251
24
  isl_schedule_node_free(graft);
4252
24
4253
24
  return node;
4254
24
}
4255
4256
/* Replace the root domain node of "node" by an extension node suitable
4257
 * for insertion at "pos".
4258
 * That is, create an extension node that maps the outer band nodes
4259
 * at "pos" to the domain of the root node of "node" and attach
4260
 * the child of this root node to the extension node.
4261
 */
4262
static __isl_give isl_schedule_node *extension_from_domain(
4263
  __isl_take isl_schedule_node *node, __isl_keep isl_schedule_node *pos)
4264
0
{
4265
0
  isl_union_set *universe;
4266
0
  isl_union_set *domain;
4267
0
  isl_union_map *ext;
4268
0
  int depth;
4269
0
  int anchored;
4270
0
  isl_space *space;
4271
0
  isl_schedule_node *res;
4272
0
  isl_schedule_tree *tree;
4273
0
4274
0
  anchored = isl_schedule_node_is_subtree_anchored(node);
4275
0
  if (anchored < 0)
4276
0
    return isl_schedule_node_free(node);
4277
0
  if (anchored)
4278
0
    isl_die(isl_schedule_node_get_ctx(node), isl_error_unsupported,
4279
0
      "cannot graft anchored tree with domain root",
4280
0
      return isl_schedule_node_free(node));
4281
0
4282
0
  depth = isl_schedule_node_get_schedule_depth(pos);
4283
0
  domain = isl_schedule_node_domain_get_domain(node);
4284
0
  space = isl_union_set_get_space(domain);
4285
0
  space = isl_space_set_from_params(space);
4286
0
  space = isl_space_add_dims(space, isl_dim_set, depth);
4287
0
  universe = isl_union_set_from_set(isl_set_universe(space));
4288
0
  ext = isl_union_map_from_domain_and_range(universe, domain);
4289
0
  res = isl_schedule_node_from_extension(ext);
4290
0
  node = isl_schedule_node_child(node, 0);
4291
0
  if (!node)
4292
0
    return isl_schedule_node_free(res);
4293
0
  if (!isl_schedule_tree_is_leaf(node->tree)) {
4294
0
    tree = isl_schedule_node_get_tree(node);
4295
0
    res = isl_schedule_node_child(res, 0);
4296
0
    res = isl_schedule_node_graft_tree(res, tree);
4297
0
    res = isl_schedule_node_parent(res);
4298
0
  }
4299
0
  isl_schedule_node_free(node);
4300
0
4301
0
  return res;
4302
0
}
4303
4304
/* Insert a node "graft" into the schedule tree of "node" such that it
4305
 * is executed before (if "before" is set) or after (if "before" is not set)
4306
 * the node that "node" points to.
4307
 * The root of "graft" may be either a domain or an extension node.
4308
 * In the latter case, the domain of the extension needs to correspond
4309
 * to the outer band nodes of "node".
4310
 * The elements of the domain or the range of the extension may not
4311
 * intersect with the domain elements that reach "node".
4312
 * The schedule tree of "graft" may not be anchored.
4313
 *
4314
 * The schedule tree of "node" is modified to include an extension node
4315
 * corresponding to the root node of "graft" as a child of the original
4316
 * parent of "node".  The original node that "node" points to and the
4317
 * child of the root node of "graft" are attached to this extension node
4318
 * through a sequence, with appropriate filters and with the child
4319
 * of "graft" appearing before or after the original "node".
4320
 *
4321
 * If "node" already appears inside a sequence that is the child of
4322
 * an extension node and if the spaces of the new domain elements
4323
 * do not overlap with those of the original domain elements,
4324
 * then that extension node is extended with the new extension
4325
 * rather than introducing a new segment of extension and sequence nodes.
4326
 *
4327
 * Return a pointer to the same node in the modified tree that
4328
 * "node" pointed to in the original tree.
4329
 */
4330
static __isl_give isl_schedule_node *isl_schedule_node_graft_before_or_after(
4331
  __isl_take isl_schedule_node *node, __isl_take isl_schedule_node *graft,
4332
  int before)
4333
24
{
4334
24
  if (!node || !graft)
4335
0
    goto error;
4336
24
  if (check_insert(node) < 0)
4337
0
    goto error;
4338
24
4339
24
  if (isl_schedule_node_get_type(graft) == isl_schedule_node_domain)
4340
0
    graft = extension_from_domain(graft, node);
4341
24
4342
24
  if (!graft)
4343
0
    goto error;
4344
24
  if (isl_schedule_node_get_type(graft) != isl_schedule_node_extension)
4345
24
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
4346
24
      "expecting domain or extension as root of graft",
4347
24
      goto error);
4348
24
4349
24
  return graft_extension(node, graft, before);
4350
0
error:
4351
0
  isl_schedule_node_free(node);
4352
0
  isl_schedule_node_free(graft);
4353
0
  return NULL;
4354
24
}
4355
4356
/* Insert a node "graft" into the schedule tree of "node" such that it
4357
 * is executed before the node that "node" points to.
4358
 * The root of "graft" may be either a domain or an extension node.
4359
 * In the latter case, the domain of the extension needs to correspond
4360
 * to the outer band nodes of "node".
4361
 * The elements of the domain or the range of the extension may not
4362
 * intersect with the domain elements that reach "node".
4363
 * The schedule tree of "graft" may not be anchored.
4364
 *
4365
 * Return a pointer to the same node in the modified tree that
4366
 * "node" pointed to in the original tree.
4367
 */
4368
__isl_give isl_schedule_node *isl_schedule_node_graft_before(
4369
  __isl_take isl_schedule_node *node, __isl_take isl_schedule_node *graft)
4370
24
{
4371
24
  return isl_schedule_node_graft_before_or_after(node, graft, 1);
4372
24
}
4373
4374
/* Insert a node "graft" into the schedule tree of "node" such that it
4375
 * is executed after the node that "node" points to.
4376
 * The root of "graft" may be either a domain or an extension node.
4377
 * In the latter case, the domain of the extension needs to correspond
4378
 * to the outer band nodes of "node".
4379
 * The elements of the domain or the range of the extension may not
4380
 * intersect with the domain elements that reach "node".
4381
 * The schedule tree of "graft" may not be anchored.
4382
 *
4383
 * Return a pointer to the same node in the modified tree that
4384
 * "node" pointed to in the original tree.
4385
 */
4386
__isl_give isl_schedule_node *isl_schedule_node_graft_after(
4387
  __isl_take isl_schedule_node *node,
4388
  __isl_take isl_schedule_node *graft)
4389
0
{
4390
0
  return isl_schedule_node_graft_before_or_after(node, graft, 0);
4391
0
}
4392
4393
/* Split the domain elements that reach "node" into those that satisfy
4394
 * "filter" and those that do not.  Arrange for the first subset to be
4395
 * executed before or after the second subset, depending on the value
4396
 * of "before".
4397
 * Return a pointer to the tree corresponding to the second subset,
4398
 * except when this subset is empty in which case the original pointer
4399
 * is returned.
4400
 * If both subsets are non-empty, then a sequence node is introduced
4401
 * to impose the order.  If the grandparent of the original node was
4402
 * itself a sequence, then the original child is replaced by two children
4403
 * in this sequence instead.
4404
 * The children in the sequence are copies of the original subtree,
4405
 * simplified with respect to their filters.
4406
 */
4407
static __isl_give isl_schedule_node *isl_schedule_node_order_before_or_after(
4408
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter,
4409
  int before)
4410
0
{
4411
0
  enum isl_schedule_node_type ancestors[] =
4412
0
    { isl_schedule_node_filter, isl_schedule_node_sequence };
4413
0
  isl_union_set *node_domain, *node_filter = NULL, *parent_filter;
4414
0
  isl_schedule_node *node2;
4415
0
  isl_schedule_tree *tree1, *tree2;
4416
0
  int empty1, empty2;
4417
0
  int in_seq;
4418
0
4419
0
  if (!node || !filter)
4420
0
    goto error;
4421
0
  if (check_insert(node) < 0)
4422
0
    goto error;
4423
0
4424
0
  in_seq = has_ancestors(node, 2, ancestors);
4425
0
  if (in_seq < 0)
4426
0
    goto error;
4427
0
  node_domain = isl_schedule_node_get_domain(node);
4428
0
  filter = isl_union_set_gist(filter, isl_union_set_copy(node_domain));
4429
0
  node_filter = isl_union_set_copy(node_domain);
4430
0
  node_filter = isl_union_set_subtract(node_filter,
4431
0
            isl_union_set_copy(filter));
4432
0
  node_filter = isl_union_set_gist(node_filter, node_domain);
4433
0
  empty1 = isl_union_set_is_empty(filter);
4434
0
  empty2 = isl_union_set_is_empty(node_filter);
4435
0
  if (empty1 < 0 || empty2 < 0)
4436
0
    goto error;
4437
0
  if (empty1 || empty2) {
4438
0
    isl_union_set_free(filter);
4439
0
    isl_union_set_free(node_filter);
4440
0
    return node;
4441
0
  }
4442
0
4443
0
  if (in_seq) {
4444
0
    node = isl_schedule_node_parent(node);
4445
0
    parent_filter = isl_schedule_node_filter_get_filter(node);
4446
0
    node_filter = isl_union_set_intersect(node_filter,
4447
0
              isl_union_set_copy(parent_filter));
4448
0
    filter = isl_union_set_intersect(filter, parent_filter);
4449
0
  }
4450
0
4451
0
  node2 = isl_schedule_node_copy(node);
4452
0
  node = isl_schedule_node_gist(node, isl_union_set_copy(node_filter));
4453
0
  node2 = isl_schedule_node_gist(node2, isl_union_set_copy(filter));
4454
0
  tree1 = isl_schedule_node_get_tree(node);
4455
0
  tree2 = isl_schedule_node_get_tree(node2);
4456
0
  tree1 = isl_schedule_tree_insert_filter(tree1, node_filter);
4457
0
  tree2 = isl_schedule_tree_insert_filter(tree2, filter);
4458
0
  isl_schedule_node_free(node2);
4459
0
4460
0
  if (before) {
4461
0
    tree1 = isl_schedule_tree_sequence_pair(tree2, tree1);
4462
0
    node = graft_or_splice(node, tree1, 1);
4463
0
  } else {
4464
0
    tree1 = isl_schedule_tree_sequence_pair(tree1, tree2);
4465
0
    node = graft_or_splice(node, tree1, 0);
4466
0
  }
4467
0
4468
0
  return node;
4469
0
error:
4470
0
  isl_schedule_node_free(node);
4471
0
  isl_union_set_free(filter);
4472
0
  isl_union_set_free(node_filter);
4473
0
  return NULL;
4474
0
}
4475
4476
/* Split the domain elements that reach "node" into those that satisfy
4477
 * "filter" and those that do not.  Arrange for the first subset to be
4478
 * executed before the second subset.
4479
 * Return a pointer to the tree corresponding to the second subset,
4480
 * except when this subset is empty in which case the original pointer
4481
 * is returned.
4482
 */
4483
__isl_give isl_schedule_node *isl_schedule_node_order_before(
4484
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
4485
0
{
4486
0
  return isl_schedule_node_order_before_or_after(node, filter, 1);
4487
0
}
4488
4489
/* Split the domain elements that reach "node" into those that satisfy
4490
 * "filter" and those that do not.  Arrange for the first subset to be
4491
 * executed after the second subset.
4492
 * Return a pointer to the tree corresponding to the second subset,
4493
 * except when this subset is empty in which case the original pointer
4494
 * is returned.
4495
 */
4496
__isl_give isl_schedule_node *isl_schedule_node_order_after(
4497
  __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
4498
0
{
4499
0
  return isl_schedule_node_order_before_or_after(node, filter, 0);
4500
0
}
4501
4502
/* Reset the user pointer on all identifiers of parameters and tuples
4503
 * in the schedule node "node".
4504
 */
4505
__isl_give isl_schedule_node *isl_schedule_node_reset_user(
4506
  __isl_take isl_schedule_node *node)
4507
0
{
4508
0
  isl_schedule_tree *tree;
4509
0
4510
0
  tree = isl_schedule_node_get_tree(node);
4511
0
  tree = isl_schedule_tree_reset_user(tree);
4512
0
  node = isl_schedule_node_graft_tree(node, tree);
4513
0
4514
0
  return node;
4515
0
}
4516
4517
/* Align the parameters of the schedule node "node" to those of "space".
4518
 */
4519
__isl_give isl_schedule_node *isl_schedule_node_align_params(
4520
  __isl_take isl_schedule_node *node, __isl_take isl_space *space)
4521
0
{
4522
0
  isl_schedule_tree *tree;
4523
0
4524
0
  tree = isl_schedule_node_get_tree(node);
4525
0
  tree = isl_schedule_tree_align_params(tree, space);
4526
0
  node = isl_schedule_node_graft_tree(node, tree);
4527
0
4528
0
  return node;
4529
0
}
4530
4531
/* Compute the pullback of schedule node "node"
4532
 * by the function represented by "upma".
4533
 * In other words, plug in "upma" in the iteration domains
4534
 * of schedule node "node".
4535
 * We currently do not handle expansion nodes.
4536
 *
4537
 * Note that this is only a helper function for
4538
 * isl_schedule_pullback_union_pw_multi_aff.  In order to maintain consistency,
4539
 * this function should not be called on a single node without also
4540
 * calling it on all the other nodes.
4541
 */
4542
__isl_give isl_schedule_node *isl_schedule_node_pullback_union_pw_multi_aff(
4543
  __isl_take isl_schedule_node *node,
4544
  __isl_take isl_union_pw_multi_aff *upma)
4545
1.06k
{
4546
1.06k
  isl_schedule_tree *tree;
4547
1.06k
4548
1.06k
  tree = isl_schedule_node_get_tree(node);
4549
1.06k
  tree = isl_schedule_tree_pullback_union_pw_multi_aff(tree, upma);
4550
1.06k
  node = isl_schedule_node_graft_tree(node, tree);
4551
1.06k
4552
1.06k
  return node;
4553
1.06k
}
4554
4555
/* Internal data structure for isl_schedule_node_expand.
4556
 * "tree" is the tree that needs to be plugged in in all the leaves.
4557
 * "domain" is the set of domain elements in the original leaves
4558
 * to which the tree applies.
4559
 */
4560
struct isl_schedule_expand_data {
4561
  isl_schedule_tree *tree;
4562
  isl_union_set *domain;
4563
};
4564
4565
/* If "node" is a leaf, then plug in data->tree, simplifying it
4566
 * within its new context.
4567
 *
4568
 * If there are any domain elements at the leaf where the tree
4569
 * should not be plugged in (i.e., there are elements not in data->domain)
4570
 * then first extend the tree to only apply to the elements in data->domain
4571
 * by constructing a set node that selects data->tree for elements
4572
 * in data->domain and a leaf for the other elements.
4573
 */
4574
static __isl_give isl_schedule_node *expand(__isl_take isl_schedule_node *node,
4575
  void *user)
4576
0
{
4577
0
  struct isl_schedule_expand_data *data = user;
4578
0
  isl_schedule_tree *tree, *leaf;
4579
0
  isl_union_set *domain, *left;
4580
0
  isl_bool empty;
4581
0
4582
0
  if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf)
4583
0
    return node;
4584
0
4585
0
  domain = isl_schedule_node_get_domain(node);
4586
0
  tree = isl_schedule_tree_copy(data->tree);
4587
0
4588
0
  left = isl_union_set_copy(domain);
4589
0
  left = isl_union_set_subtract(left, isl_union_set_copy(data->domain));
4590
0
  empty = isl_union_set_is_empty(left);
4591
0
  if (empty >= 0 && !empty) {
4592
0
    leaf = isl_schedule_node_get_leaf(node);
4593
0
    leaf = isl_schedule_tree_insert_filter(leaf, left);
4594
0
    left = isl_union_set_copy(data->domain);
4595
0
    tree = isl_schedule_tree_insert_filter(tree, left);
4596
0
    tree = isl_schedule_tree_set_pair(tree, leaf);
4597
0
  } else {
4598
0
    if (empty < 0)
4599
0
      node = isl_schedule_node_free(node);
4600
0
    isl_union_set_free(left);
4601
0
  }
4602
0
4603
0
  node = isl_schedule_node_graft_tree(node, tree);
4604
0
  node = isl_schedule_node_gist(node, domain);
4605
0
4606
0
  return node;
4607
0
}
4608
4609
/* Expand the tree rooted at "node" by extending all leaves
4610
 * with an expansion node with as child "tree".
4611
 * The expansion is determined by "contraction" and "domain".
4612
 * That is, the elements of "domain" are contracted according
4613
 * to "contraction".  The expansion relation is then the inverse
4614
 * of "contraction" with its range intersected with "domain".
4615
 *
4616
 * Insert the appropriate expansion node on top of "tree" and
4617
 * then plug in the result in all leaves of "node".
4618
 */
4619
__isl_give isl_schedule_node *isl_schedule_node_expand(
4620
  __isl_take isl_schedule_node *node,
4621
  __isl_take isl_union_pw_multi_aff *contraction,
4622
  __isl_take isl_union_set *domain,
4623
  __isl_take isl_schedule_tree *tree)
4624
0
{
4625
0
  struct isl_schedule_expand_data data;
4626
0
  isl_union_map *expansion;
4627
0
  isl_union_pw_multi_aff *copy;
4628
0
4629
0
  if (!node || !contraction || !tree)
4630
0
    node = isl_schedule_node_free(node);
4631
0
4632
0
  copy = isl_union_pw_multi_aff_copy(contraction);
4633
0
  expansion = isl_union_map_from_union_pw_multi_aff(copy);
4634
0
  expansion = isl_union_map_reverse(expansion);
4635
0
  expansion = isl_union_map_intersect_range(expansion, domain);
4636
0
  data.domain = isl_union_map_domain(isl_union_map_copy(expansion));
4637
0
4638
0
  tree = isl_schedule_tree_insert_expansion(tree, contraction, expansion);
4639
0
  data.tree = tree;
4640
0
4641
0
  node = isl_schedule_node_map_descendant_bottom_up(node, &expand, &data);
4642
0
  isl_union_set_free(data.domain);
4643
0
  isl_schedule_tree_free(data.tree);
4644
0
  return node;
4645
0
}
4646
4647
/* Return the position of the subtree containing "node" among the children
4648
 * of "ancestor".  "node" is assumed to be a descendant of "ancestor".
4649
 * In particular, both nodes should point to the same schedule tree.
4650
 *
4651
 * Return -1 on error.
4652
 */
4653
int isl_schedule_node_get_ancestor_child_position(
4654
  __isl_keep isl_schedule_node *node,
4655
  __isl_keep isl_schedule_node *ancestor)
4656
64.1k
{
4657
64.1k
  int n1, n2;
4658
64.1k
  isl_schedule_tree *tree;
4659
64.1k
4660
64.1k
  if (!node || !ancestor)
4661
0
    return -1;
4662
64.1k
4663
64.1k
  if (node->schedule != ancestor->schedule)
4664
64.1k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
4665
64.1k
      "not a descendant", return -1);
4666
64.1k
4667
64.1k
  n1 = isl_schedule_node_get_tree_depth(ancestor);
4668
64.1k
  n2 = isl_schedule_node_get_tree_depth(node);
4669
64.1k
4670
64.1k
  if (n1 >= n2)
4671
64.1k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
4672
64.1k
      "not a descendant", return -1);
4673
64.1k
  tree = isl_schedule_tree_list_get_schedule_tree(node->ancestors, n1);
4674
64.1k
  isl_schedule_tree_free(tree);
4675
64.1k
  if (tree != ancestor->tree)
4676
64.1k
    
isl_die0
(isl_schedule_node_get_ctx(node), isl_error_invalid,
4677
64.1k
      "not a descendant", return -1);
4678
64.1k
4679
64.1k
  return node->child_pos[n1];
4680
64.1k
}
4681
4682
/* Given two nodes that point to the same schedule tree, return their
4683
 * closest shared ancestor.
4684
 *
4685
 * Since the two nodes point to the same schedule, they share at least
4686
 * one ancestor, the root of the schedule.  We move down from the root
4687
 * to the first ancestor where the respective children have a different
4688
 * child position.  This is the requested ancestor.
4689
 * If there is no ancestor where the children have a different position,
4690
 * then one node is an ancestor of the other and then this node is
4691
 * the requested ancestor.
4692
 */
4693
__isl_give isl_schedule_node *isl_schedule_node_get_shared_ancestor(
4694
  __isl_keep isl_schedule_node *node1,
4695
  __isl_keep isl_schedule_node *node2)
4696
62.8k
{
4697
62.8k
  int i, n1, n2;
4698
62.8k
4699
62.8k
  if (!node1 || !node2)
4700
0
    return NULL;
4701
62.8k
  if (node1->schedule != node2->schedule)
4702
62.8k
    
isl_die0
(isl_schedule_node_get_ctx(node1), isl_error_invalid,
4703
62.8k
      "not part of same schedule", return NULL);
4704
62.8k
  n1 = isl_schedule_node_get_tree_depth(node1);
4705
62.8k
  n2 = isl_schedule_node_get_tree_depth(node2);
4706
62.8k
  if (n2 < n1)
4707
2.48k
    return isl_schedule_node_get_shared_ancestor(node2, node1);
4708
60.3k
  if (n1 == 0)
4709
0
    return isl_schedule_node_copy(node1);
4710
60.3k
  if (isl_schedule_node_is_equal(node1, node2))
4711
28.2k
    return isl_schedule_node_copy(node1);
4712
32.0k
4713
81.0k
  
for (i = 0; 32.0k
i < n1;
++i48.9k
)
4714
81.0k
    if (node1->child_pos[i] != node2->child_pos[i])
4715
32.0k
      break;
4716
32.0k
4717
32.0k
  node1 = isl_schedule_node_copy(node1);
4718
32.0k
  return isl_schedule_node_ancestor(node1, n1 - i);
4719
32.0k
}
4720
4721
/* Print "node" to "p".
4722
 */
4723
__isl_give isl_printer *isl_printer_print_schedule_node(
4724
  __isl_take isl_printer *p, __isl_keep isl_schedule_node *node)
4725
0
{
4726
0
  if (!node)
4727
0
    return isl_printer_free(p);
4728
0
  return isl_printer_print_schedule_tree_mark(p, node->schedule->root,
4729
0
      isl_schedule_tree_list_n_schedule_tree(node->ancestors),
4730
0
      node->child_pos);
4731
0
}
4732
4733
void isl_schedule_node_dump(__isl_keep isl_schedule_node *node)
4734
0
{
4735
0
  isl_ctx *ctx;
4736
0
  isl_printer *printer;
4737
0
4738
0
  if (!node)
4739
0
    return;
4740
0
4741
0
  ctx = isl_schedule_node_get_ctx(node);
4742
0
  printer = isl_printer_to_file(ctx, stderr);
4743
0
  printer = isl_printer_set_yaml_style(printer, ISL_YAML_STYLE_BLOCK);
4744
0
  printer = isl_printer_print_schedule_node(printer, node);
4745
0
4746
0
  isl_printer_free(printer);
4747
0
}
4748
4749
/* Return a string representation of "node".
4750
 * Print the schedule node in block format as it would otherwise
4751
 * look identical to the entire schedule.
4752
 */
4753
__isl_give char *isl_schedule_node_to_str(__isl_keep isl_schedule_node *node)
4754
0
{
4755
0
  isl_printer *printer;
4756
0
  char *s;
4757
0
4758
0
  if (!node)
4759
0
    return NULL;
4760
0
4761
0
  printer = isl_printer_to_str(isl_schedule_node_get_ctx(node));
4762
0
  printer = isl_printer_set_yaml_style(printer, ISL_YAML_STYLE_BLOCK);
4763
0
  printer = isl_printer_print_schedule_node(printer, node);
4764
0
  s = isl_printer_get_str(printer);
4765
0
  isl_printer_free(printer);
4766
0
4767
0
  return s;
4768
0
}