Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/polly/lib/Support/ScopHelper.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- ScopHelper.cpp - Some Helper Functions for Scop.  ------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// Small functions that help with Scop and LLVM-IR.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "polly/Support/ScopHelper.h"
14
#include "polly/Options.h"
15
#include "polly/ScopInfo.h"
16
#include "polly/Support/SCEVValidator.h"
17
#include "llvm/Analysis/LoopInfo.h"
18
#include "llvm/Analysis/RegionInfo.h"
19
#include "llvm/Analysis/ScalarEvolution.h"
20
#include "llvm/Analysis/ScalarEvolutionExpander.h"
21
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
22
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
23
24
using namespace llvm;
25
using namespace polly;
26
27
#define DEBUG_TYPE "polly-scop-helper"
28
29
static cl::opt<bool> PollyAllowErrorBlocks(
30
    "polly-allow-error-blocks",
31
    cl::desc("Allow to speculate on the execution of 'error blocks'."),
32
    cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
33
34
static cl::list<std::string> DebugFunctions(
35
    "polly-debug-func",
36
    cl::desc("Allow calls to the specified functions in SCoPs even if their "
37
             "side-effects are unknown. This can be used to do debug output in "
38
             "Polly-transformed code."),
39
    cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, cl::cat(PollyCategory));
40
41
// Ensures that there is just one predecessor to the entry node from outside the
42
// region.
43
// The identity of the region entry node is preserved.
44
static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI,
45
305
                                RegionInfo *RI) {
46
305
  BasicBlock *EnteringBB = R->getEnteringBlock();
47
305
  BasicBlock *Entry = R->getEntry();
48
305
49
305
  // Before (one of):
50
305
  //
51
305
  //                       \    /            //
52
305
  //                      EnteringBB         //
53
305
  //                        |    \------>    //
54
305
  //   \   /                |                //
55
305
  //   Entry <--\         Entry <--\         //
56
305
  //   /   \    /         /   \    /         //
57
305
  //        ....               ....          //
58
305
59
305
  // Create single entry edge if the region has multiple entry edges.
60
305
  if (!EnteringBB) {
61
6
    SmallVector<BasicBlock *, 4> Preds;
62
6
    for (BasicBlock *P : predecessors(Entry))
63
12
      if (!R->contains(P))
64
12
        Preds.push_back(P);
65
6
66
6
    BasicBlock *NewEntering =
67
6
        SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI);
68
6
69
6
    if (RI) {
70
6
      // The exit block of predecessing regions must be changed to NewEntering
71
12
      for (BasicBlock *ExitPred : predecessors(NewEntering)) {
72
12
        Region *RegionOfPred = RI->getRegionFor(ExitPred);
73
12
        if (RegionOfPred->getExit() != Entry)
74
11
          continue;
75
1
76
2
        
while (1
!RegionOfPred->isTopLevelRegion() &&
77
2
               
RegionOfPred->getExit() == Entry1
) {
78
1
          RegionOfPred->replaceExit(NewEntering);
79
1
          RegionOfPred = RegionOfPred->getParent();
80
1
        }
81
1
      }
82
6
83
6
      // Make all ancestors use EnteringBB as entry; there might be edges to it
84
6
      Region *AncestorR = R->getParent();
85
6
      RI->setRegionFor(NewEntering, AncestorR);
86
11
      while (!AncestorR->isTopLevelRegion() && 
AncestorR->getEntry() == Entry6
) {
87
5
        AncestorR->replaceEntry(NewEntering);
88
5
        AncestorR = AncestorR->getParent();
89
5
      }
90
6
    }
91
6
92
6
    EnteringBB = NewEntering;
93
6
  }
94
305
  assert(R->getEnteringBlock() == EnteringBB);
95
305
96
305
  // After:
97
305
  //
98
305
  //    \    /       //
99
305
  //  EnteringBB     //
100
305
  //      |          //
101
305
  //      |          //
102
305
  //    Entry <--\   //
103
305
  //    /   \    /   //
104
305
  //         ....    //
105
305
}
106
107
// Ensure that the region has a single block that branches to the exit node.
108
static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI,
109
305
                               RegionInfo *RI) {
110
305
  BasicBlock *ExitBB = R->getExit();
111
305
  BasicBlock *ExitingBB = R->getExitingBlock();
112
305
113
305
  // Before:
114
305
  //
115
305
  //   (Region)   ______/  //
116
305
  //      \  |   /         //
117
305
  //       ExitBB          //
118
305
  //       /    \          //
119
305
120
305
  if (!ExitingBB) {
121
59
    SmallVector<BasicBlock *, 4> Preds;
122
59
    for (BasicBlock *P : predecessors(ExitBB))
123
136
      if (R->contains(P))
124
120
        Preds.push_back(P);
125
59
126
59
    //  Preds[0] Preds[1]      otherBB //
127
59
    //         \  |  ________/         //
128
59
    //          \ | /                  //
129
59
    //           BB                    //
130
59
    ExitingBB =
131
59
        SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI);
132
59
    // Preds[0] Preds[1]      otherBB  //
133
59
    //        \  /           /         //
134
59
    // BB.region_exiting    /          //
135
59
    //                  \  /           //
136
59
    //                   BB            //
137
59
138
59
    if (RI)
139
59
      RI->setRegionFor(ExitingBB, R);
140
59
141
59
    // Change the exit of nested regions, but not the region itself,
142
59
    R->replaceExitRecursive(ExitingBB);
143
59
    R->replaceExit(ExitBB);
144
59
  }
145
305
  assert(ExitingBB == R->getExitingBlock());
146
305
147
305
  // After:
148
305
  //
149
305
  //     \   /                //
150
305
  //    ExitingBB     _____/  //
151
305
  //          \      /        //
152
305
  //           ExitBB         //
153
305
  //           /    \         //
154
305
}
155
156
void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI,
157
305
                           RegionInfo *RI) {
158
305
  assert(R && !R->isTopLevelRegion());
159
305
  assert(!RI || RI == R->getRegionInfo());
160
305
  assert((!RI || DT) &&
161
305
         "RegionInfo requires DominatorTree to be updated as well");
162
305
163
305
  simplifyRegionEntry(R, DT, LI, RI);
164
305
  simplifyRegionExit(R, DT, LI, RI);
165
305
  assert(R->isSimple());
166
305
}
167
168
// Split the block into two successive blocks.
169
//
170
// Like llvm::SplitBlock, but also preserves RegionInfo
171
static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt,
172
                              DominatorTree *DT, llvm::LoopInfo *LI,
173
14
                              RegionInfo *RI) {
174
14
  assert(Old && SplitPt);
175
14
176
14
  // Before:
177
14
  //
178
14
  //  \   /  //
179
14
  //   Old   //
180
14
  //  /   \  //
181
14
182
14
  BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI);
183
14
184
14
  if (RI) {
185
0
    Region *R = RI->getRegionFor(Old);
186
0
    RI->setRegionFor(NewBlock, R);
187
0
  }
188
14
189
14
  // After:
190
14
  //
191
14
  //   \   /    //
192
14
  //    Old     //
193
14
  //     |      //
194
14
  //  NewBlock  //
195
14
  //   /   \    //
196
14
197
14
  return NewBlock;
198
14
}
199
200
void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, DominatorTree *DT,
201
14
                                     LoopInfo *LI, RegionInfo *RI) {
202
14
  // Find first non-alloca instruction. Every basic block has a non-alloca
203
14
  // instruction, as every well formed basic block has a terminator.
204
14
  BasicBlock::iterator I = EntryBlock->begin();
205
14
  while (isa<AllocaInst>(I))
206
0
    ++I;
207
14
208
14
  // splitBlock updates DT, LI and RI.
209
14
  splitBlock(EntryBlock, &*I, DT, LI, RI);
210
14
}
211
212
13
void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) {
213
13
  auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
214
13
  auto *DT = DTWP ? &DTWP->getDomTree() : 
nullptr0
;
215
13
  auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>();
216
13
  auto *LI = LIWP ? &LIWP->getLoopInfo() : 
nullptr0
;
217
13
  RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>();
218
13
  RegionInfo *RI = RIP ? 
&RIP->getRegionInfo()0
: nullptr;
219
13
220
13
  // splitBlock updates DT, LI and RI.
221
13
  polly::splitEntryBlockForAlloca(EntryBlock, DT, LI, RI);
222
13
}
223
224
/// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem
225
/// instruction but just use it, if it is referenced as a SCEVUnknown. We want
226
/// however to generate new code if the instruction is in the analyzed region
227
/// and we generate code outside/in front of that region. Hence, we generate the
228
/// code for the SDiv/SRem operands in front of the analyzed region and then
229
/// create a new SDiv/SRem operation there too.
230
struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> {
231
  friend struct SCEVVisitor<ScopExpander, const SCEV *>;
232
233
  explicit ScopExpander(const Region &R, ScalarEvolution &SE,
234
                        const DataLayout &DL, const char *Name, ValueMapT *VMap,
235
                        BasicBlock *RTCBB)
236
      : Expander(SCEVExpander(SE, DL, Name)), SE(SE), Name(Name), R(R),
237
1.14k
        VMap(VMap), RTCBB(RTCBB) {}
238
239
1.14k
  Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) {
240
1.14k
    // If we generate code in the region we will immediately fall back to the
241
1.14k
    // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if
242
1.14k
    // needed replace them by copies computed in the entering block.
243
1.14k
    if (!R.contains(I))
244
1.14k
      E = visit(E);
245
1.14k
    return Expander.expandCodeFor(E, Ty, I);
246
1.14k
  }
247
248
3.88k
  const SCEV *visit(const SCEV *E) {
249
3.88k
    // Cache the expansion results for intermediate SCEV expressions. A SCEV
250
3.88k
    // expression can refer to an operand multiple times (e.g. "x*x), so
251
3.88k
    // a naive visitor takes exponential time.
252
3.88k
    if (SCEVCache.count(E))
253
55
      return SCEVCache[E];
254
3.83k
    const SCEV *Result = SCEVVisitor::visit(E);
255
3.83k
    SCEVCache[E] = Result;
256
3.83k
    return Result;
257
3.83k
  }
258
259
private:
260
  SCEVExpander Expander;
261
  ScalarEvolution &SE;
262
  const char *Name;
263
  const Region &R;
264
  ValueMapT *VMap;
265
  BasicBlock *RTCBB;
266
  DenseMap<const SCEV *, const SCEV *> SCEVCache;
267
268
  const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst,
269
1.53k
                               Instruction *IP) {
270
1.53k
    if (!Inst || 
!R.contains(Inst)653
)
271
1.53k
      return E;
272
2
273
2
    assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() &&
274
2
           !isa<PHINode>(Inst));
275
2
276
2
    auto *InstClone = Inst->clone();
277
2
    for (auto &Op : Inst->operands()) {
278
2
      assert(SE.isSCEVable(Op->getType()));
279
2
      auto *OpSCEV = SE.getSCEV(Op);
280
2
      auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP);
281
2
      InstClone->replaceUsesOfWith(Op, OpClone);
282
2
    }
283
2
284
2
    InstClone->setName(Name + Inst->getName());
285
2
    InstClone->insertBefore(IP);
286
2
    return SE.getSCEV(InstClone);
287
2
  }
288
289
1.61k
  const SCEV *visitUnknown(const SCEVUnknown *E) {
290
1.61k
291
1.61k
    // If a value mapping was given try if the underlying value is remapped.
292
1.61k
    Value *NewVal = VMap ? 
VMap->lookup(E->getValue())1.60k
:
nullptr16
;
293
1.61k
    if (NewVal) {
294
85
      auto *NewE = SE.getSCEV(NewVal);
295
85
296
85
      // While the mapped value might be different the SCEV representation might
297
85
      // not be. To this end we will check before we go into recursion here.
298
85
      if (E != NewE)
299
82
        return visit(NewE);
300
1.53k
    }
301
1.53k
302
1.53k
    Instruction *Inst = dyn_cast<Instruction>(E->getValue());
303
1.53k
    Instruction *IP;
304
1.53k
    if (Inst && 
!R.contains(Inst)654
)
305
652
      IP = Inst;
306
885
    else if (Inst && 
RTCBB->getParent() == Inst->getFunction()2
)
307
2
      IP = RTCBB->getTerminator();
308
883
    else
309
883
      IP = RTCBB->getParent()->getEntryBlock().getTerminator();
310
1.53k
311
1.53k
    if (!Inst || 
(654
Inst->getOpcode() != Instruction::SRem654
&&
312
654
                  Inst->getOpcode() != Instruction::SDiv))
313
1.53k
      return visitGenericInst(E, Inst, IP);
314
1
315
1
    const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0));
316
1
    const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1));
317
1
318
1
    if (!SE.isKnownNonZero(RHSScev))
319
1
      RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
320
1
321
1
    Value *LHS = expandCodeFor(LHSScev, E->getType(), IP);
322
1
    Value *RHS = expandCodeFor(RHSScev, E->getType(), IP);
323
1
324
1
    Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(),
325
1
                                  LHS, RHS, Inst->getName() + Name, IP);
326
1
    return SE.getSCEV(Inst);
327
1
  }
328
329
  /// The following functions will just traverse the SCEV and rebuild it with
330
  /// the new operands returned by the traversal.
331
  ///
332
  ///{
333
935
  const SCEV *visitConstant(const SCEVConstant *E) { return E; }
334
74
  const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) {
335
74
    return SE.getTruncateExpr(visit(E->getOperand()), E->getType());
336
74
  }
337
22
  const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) {
338
22
    return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType());
339
22
  }
340
36
  const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) {
341
36
    return SE.getSignExtendExpr(visit(E->getOperand()), E->getType());
342
36
  }
343
13
  const SCEV *visitUDivExpr(const SCEVUDivExpr *E) {
344
13
    auto *RHSScev = visit(E->getRHS());
345
13
    if (!SE.isKnownNonZero(RHSScev))
346
8
      RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
347
13
    return SE.getUDivExpr(visit(E->getLHS()), RHSScev);
348
13
  }
349
517
  const SCEV *visitAddExpr(const SCEVAddExpr *E) {
350
517
    SmallVector<const SCEV *, 4> NewOps;
351
517
    for (const SCEV *Op : E->operands())
352
1.23k
      NewOps.push_back(visit(Op));
353
517
    return SE.getAddExpr(NewOps);
354
517
  }
355
574
  const SCEV *visitMulExpr(const SCEVMulExpr *E) {
356
574
    SmallVector<const SCEV *, 4> NewOps;
357
574
    for (const SCEV *Op : E->operands())
358
1.18k
      NewOps.push_back(visit(Op));
359
574
    return SE.getMulExpr(NewOps);
360
574
  }
361
1
  const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
362
1
    SmallVector<const SCEV *, 4> NewOps;
363
1
    for (const SCEV *Op : E->operands())
364
2
      NewOps.push_back(visit(Op));
365
1
    return SE.getUMaxExpr(NewOps);
366
1
  }
367
0
  const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
368
0
    SmallVector<const SCEV *, 4> NewOps;
369
0
    for (const SCEV *Op : E->operands())
370
0
      NewOps.push_back(visit(Op));
371
0
    return SE.getSMaxExpr(NewOps);
372
0
  }
373
1
  const SCEV *visitUMinExpr(const SCEVUMinExpr *E) {
374
1
    SmallVector<const SCEV *, 4> NewOps;
375
1
    for (const SCEV *Op : E->operands())
376
2
      NewOps.push_back(visit(Op));
377
1
    return SE.getUMinExpr(NewOps);
378
1
  }
379
0
  const SCEV *visitSMinExpr(const SCEVSMinExpr *E) {
380
0
    SmallVector<const SCEV *, 4> NewOps;
381
0
    for (const SCEV *Op : E->operands())
382
0
      NewOps.push_back(visit(Op));
383
0
    return SE.getSMinExpr(NewOps);
384
0
  }
385
38
  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
386
38
    SmallVector<const SCEV *, 4> NewOps;
387
38
    for (const SCEV *Op : E->operands())
388
76
      NewOps.push_back(visit(Op));
389
38
    return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags());
390
38
  }
391
  ///}
392
};
393
394
Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL,
395
                            const char *Name, const SCEV *E, Type *Ty,
396
                            Instruction *IP, ValueMapT *VMap,
397
1.14k
                            BasicBlock *RTCBB) {
398
1.14k
  ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap, RTCBB);
399
1.14k
  return Expander.expandCodeFor(E, Ty, IP);
400
1.14k
}
401
402
bool polly::isErrorBlock(BasicBlock &BB, const Region &R, LoopInfo &LI,
403
105k
                         const DominatorTree &DT) {
404
105k
  if (!PollyAllowErrorBlocks)
405
0
    return false;
406
105k
407
105k
  if (isa<UnreachableInst>(BB.getTerminator()))
408
0
    return true;
409
105k
410
105k
  if (LI.isLoopHeader(&BB))
411
40.6k
    return false;
412
64.7k
413
64.7k
  // Basic blocks that are always executed are not considered error blocks,
414
64.7k
  // as their execution can not be a rare event.
415
64.7k
  bool DominatesAllPredecessors = true;
416
64.7k
  if (R.isTopLevelRegion()) {
417
117
    for (BasicBlock &I : *R.getEntry()->getParent())
418
634
      if (isa<ReturnInst>(I.getTerminator()) && 
!DT.dominates(&BB, &I)117
)
419
14
        DominatesAllPredecessors = false;
420
64.6k
  } else {
421
64.6k
    for (auto Pred : predecessors(R.getExit()))
422
98.8k
      if (R.contains(Pred) && 
!DT.dominates(&BB, Pred)93.0k
)
423
47.5k
        DominatesAllPredecessors = false;
424
64.6k
  }
425
64.7k
426
64.7k
  if (DominatesAllPredecessors)
427
22.9k
    return false;
428
41.8k
429
41.8k
  for (Instruction &Inst : BB)
430
372k
    if (CallInst *CI = dyn_cast<CallInst>(&Inst)) {
431
1.20k
      if (isDebugCall(CI))
432
8
        continue;
433
1.19k
434
1.19k
      if (isIgnoredIntrinsic(CI))
435
249
        continue;
436
946
437
946
      // memset, memcpy and memmove are modeled intrinsics.
438
946
      if (isa<MemSetInst>(CI) || 
isa<MemTransferInst>(CI)885
)
439
129
        continue;
440
817
441
817
      if (!CI->doesNotAccessMemory())
442
348
        return true;
443
469
      if (CI->doesNotReturn())
444
0
        return true;
445
469
    }
446
41.8k
447
41.8k
  
return false41.4k
;
448
41.8k
}
449
450
32.3k
Value *polly::getConditionFromTerminator(Instruction *TI) {
451
32.3k
  if (BranchInst *BR = dyn_cast<BranchInst>(TI)) {
452
32.3k
    if (BR->isUnconditional())
453
14.7k
      return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext()));
454
17.5k
455
17.5k
    return BR->getCondition();
456
17.5k
  }
457
56
458
56
  if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
459
56
    return SI->getCondition();
460
0
461
0
  return nullptr;
462
0
}
463
464
3.54k
Loop *polly::getLoopSurroundingScop(Scop &S, LoopInfo &LI) {
465
3.54k
  // Start with the smallest loop containing the entry and expand that
466
3.54k
  // loop until it contains all blocks in the region. If there is a loop
467
3.54k
  // containing all blocks in the region check if it is itself contained
468
3.54k
  // and if so take the parent loop as it will be the smallest containing
469
3.54k
  // the region but not contained by it.
470
3.54k
  Loop *L = LI.getLoopFor(S.getEntry());
471
4.67k
  while (L) {
472
3.12k
    bool AllContained = true;
473
3.12k
    for (auto *BB : S.blocks())
474
25.1k
      AllContained &= L->contains(BB);
475
3.12k
    if (AllContained)
476
1.99k
      break;
477
1.12k
    L = L->getParentLoop();
478
1.12k
  }
479
3.54k
480
3.54k
  return L ? 
(S.contains(L) 1.99k
?
L->getParentLoop()1.83k
:
L163
) :
nullptr1.55k
;
481
3.54k
}
482
483
5.29k
unsigned polly::getNumBlocksInLoop(Loop *L) {
484
5.29k
  unsigned NumBlocks = L->getNumBlocks();
485
5.29k
  SmallVector<BasicBlock *, 4> ExitBlocks;
486
5.29k
  L->getExitBlocks(ExitBlocks);
487
5.29k
488
5.32k
  for (auto ExitBlock : ExitBlocks) {
489
5.32k
    if (isa<UnreachableInst>(ExitBlock->getTerminator()))
490
70
      NumBlocks++;
491
5.32k
  }
492
5.29k
  return NumBlocks;
493
5.29k
}
494
495
5.55k
unsigned polly::getNumBlocksInRegionNode(RegionNode *RN) {
496
5.55k
  if (!RN->isSubRegion())
497
5.44k
    return 1;
498
106
499
106
  Region *R = RN->getNodeAs<Region>();
500
106
  return std::distance(R->block_begin(), R->block_end());
501
106
}
502
503
19.0k
Loop *polly::getRegionNodeLoop(RegionNode *RN, LoopInfo &LI) {
504
19.0k
  if (!RN->isSubRegion()) {
505
17.4k
    BasicBlock *BB = RN->getNodeAs<BasicBlock>();
506
17.4k
    Loop *L = LI.getLoopFor(BB);
507
17.4k
508
17.4k
    // Unreachable statements are not considered to belong to a LLVM loop, as
509
17.4k
    // they are not part of an actual loop in the control flow graph.
510
17.4k
    // Nevertheless, we handle certain unreachable statements that are common
511
17.4k
    // when modeling run-time bounds checks as being part of the loop to be
512
17.4k
    // able to model them and to later eliminate the run-time bounds checks.
513
17.4k
    //
514
17.4k
    // Specifically, for basic blocks that terminate in an unreachable and
515
17.4k
    // where the immediate predecessor is part of a loop, we assume these
516
17.4k
    // basic blocks belong to the loop the predecessor belongs to. This
517
17.4k
    // allows us to model the following code.
518
17.4k
    //
519
17.4k
    // for (i = 0; i < N; i++) {
520
17.4k
    //   if (i > 1024)
521
17.4k
    //     abort();            <- this abort might be translated to an
522
17.4k
    //                            unreachable
523
17.4k
    //
524
17.4k
    //   A[i] = ...
525
17.4k
    // }
526
17.4k
    if (!L && 
isa<UnreachableInst>(BB->getTerminator())2.34k
&&
BB->getPrevNode()0
)
527
0
      L = LI.getLoopFor(BB->getPrevNode());
528
17.4k
    return L;
529
17.4k
  }
530
1.54k
531
1.54k
  Region *NonAffineSubRegion = RN->getNodeAs<Region>();
532
1.54k
  Loop *L = LI.getLoopFor(NonAffineSubRegion->getEntry());
533
2.52k
  while (L && 
NonAffineSubRegion->contains(L)1.87k
)
534
987
    L = L->getParentLoop();
535
1.54k
  return L;
536
1.54k
}
537
538
static bool hasVariantIndex(GetElementPtrInst *Gep, Loop *L, Region &R,
539
944
                            ScalarEvolution &SE) {
540
1.22k
  for (const Use &Val : llvm::drop_begin(Gep->operands(), 1)) {
541
1.22k
    const SCEV *PtrSCEV = SE.getSCEVAtScope(Val, L);
542
1.22k
    Loop *OuterLoop = R.outermostLoopInRegion(L);
543
1.22k
    if (!SE.isLoopInvariant(PtrSCEV, OuterLoop))
544
69
      return true;
545
1.22k
  }
546
944
  
return false875
;
547
944
}
548
549
bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI,
550
                            ScalarEvolution &SE, const DominatorTree &DT,
551
3.02k
                            const InvariantLoadsSetTy &KnownInvariantLoads) {
552
3.02k
  Loop *L = LI.getLoopFor(LInst->getParent());
553
3.02k
  auto *Ptr = LInst->getPointerOperand();
554
3.02k
555
3.02k
  // A LoadInst is hoistable if the address it is loading from is also
556
3.02k
  // invariant; in this case: another invariant load (whether that address
557
3.02k
  // is also not written to has to be checked separately)
558
3.02k
  // TODO: This only checks for a LoadInst->GetElementPtrInst->LoadInst
559
3.02k
  // pattern generated by the Chapel frontend, but generally this applies
560
3.02k
  // for any chain of instruction that does not also depend on any
561
3.02k
  // induction variable
562
3.02k
  if (auto *GepInst = dyn_cast<GetElementPtrInst>(Ptr)) {
563
944
    if (!hasVariantIndex(GepInst, L, R, SE)) {
564
875
      if (auto *DecidingLoad =
565
273
              dyn_cast<LoadInst>(GepInst->getPointerOperand())) {
566
273
        if (KnownInvariantLoads.count(DecidingLoad))
567
179
          return true;
568
2.84k
      }
569
875
    }
570
944
  }
571
2.84k
572
2.84k
  const SCEV *PtrSCEV = SE.getSCEVAtScope(Ptr, L);
573
3.19k
  while (L && 
R.contains(L)715
) {
574
369
    if (!SE.isLoopInvariant(PtrSCEV, L))
575
19
      return false;
576
350
    L = L->getParentLoop();
577
350
  }
578
2.84k
579
21.3k
  
for (auto *User : Ptr->users())2.82k
{
580
21.3k
    auto *UserI = dyn_cast<Instruction>(User);
581
21.3k
    if (!UserI || !R.contains(UserI))
582
3.06k
      continue;
583
18.2k
    if (!UserI->mayWriteToMemory())
584
17.8k
      continue;
585
396
586
396
    auto &BB = *UserI->getParent();
587
396
    if (DT.dominates(&BB, LInst->getParent()))
588
122
      return false;
589
274
590
274
    bool DominatesAllPredecessors = true;
591
274
    if (R.isTopLevelRegion()) {
592
1
      for (BasicBlock &I : *R.getEntry()->getParent())
593
5
        if (isa<ReturnInst>(I.getTerminator()) && 
!DT.dominates(&BB, &I)1
)
594
1
          DominatesAllPredecessors = false;
595
273
    } else {
596
273
      for (auto Pred : predecessors(R.getExit()))
597
532
        if (R.contains(Pred) && !DT.dominates(&BB, Pred))
598
498
          DominatesAllPredecessors = false;
599
273
    }
600
274
601
274
    if (!DominatesAllPredecessors)
602
274
      continue;
603
0
604
0
    return false;
605
0
  }
606
2.82k
607
2.82k
  
return true2.70k
;
608
2.82k
}
609
610
17.9k
bool polly::isIgnoredIntrinsic(const Value *V) {
611
17.9k
  if (auto *IT = dyn_cast<IntrinsicInst>(V)) {
612
622
    switch (IT->getIntrinsicID()) {
613
622
    // Lifetime markers are supported/ignored.
614
622
    case llvm::Intrinsic::lifetime_start:
615
307
    case llvm::Intrinsic::lifetime_end:
616
307
    // Invariant markers are supported/ignored.
617
307
    case llvm::Intrinsic::invariant_start:
618
307
    case llvm::Intrinsic::invariant_end:
619
307
    // Some misc annotations are supported/ignored.
620
307
    case llvm::Intrinsic::var_annotation:
621
307
    case llvm::Intrinsic::ptr_annotation:
622
307
    case llvm::Intrinsic::annotation:
623
307
    case llvm::Intrinsic::donothing:
624
307
    case llvm::Intrinsic::assume:
625
307
    // Some debug info intrinsics are supported/ignored.
626
307
    case llvm::Intrinsic::dbg_value:
627
307
    case llvm::Intrinsic::dbg_declare:
628
307
      return true;
629
315
    default:
630
315
      break;
631
17.6k
    }
632
17.6k
  }
633
17.6k
  return false;
634
17.6k
}
635
636
bool polly::canSynthesize(const Value *V, const Scop &S, ScalarEvolution *SE,
637
28.3k
                          Loop *Scope) {
638
28.3k
  if (!V || !SE->isSCEVable(V->getType()))
639
4.45k
    return false;
640
23.9k
641
23.9k
  const InvariantLoadsSetTy &ILS = S.getRequiredInvariantLoads();
642
23.9k
  if (const SCEV *Scev = SE->getSCEVAtScope(const_cast<Value *>(V), Scope))
643
23.9k
    if (!isa<SCEVCouldNotCompute>(Scev))
644
23.9k
      if (!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false, ILS))
645
16.3k
        return true;
646
7.58k
647
7.58k
  return false;
648
7.58k
}
649
650
19.1k
llvm::BasicBlock *polly::getUseBlock(const llvm::Use &U) {
651
19.1k
  Instruction *UI = dyn_cast<Instruction>(U.getUser());
652
19.1k
  if (!UI)
653
0
    return nullptr;
654
19.1k
655
19.1k
  if (PHINode *PHI = dyn_cast<PHINode>(UI))
656
2.06k
    return PHI->getIncomingBlock(U);
657
17.0k
658
17.0k
  return UI->getParent();
659
17.0k
}
660
661
std::tuple<std::vector<const SCEV *>, std::vector<int>>
662
2.76k
polly::getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) {
663
2.76k
  std::vector<const SCEV *> Subscripts;
664
2.76k
  std::vector<int> Sizes;
665
2.76k
666
2.76k
  Type *Ty = GEP->getPointerOperandType();
667
2.76k
668
2.76k
  bool DroppedFirstDim = false;
669
2.76k
670
6.23k
  for (unsigned i = 1; i < GEP->getNumOperands(); 
i++3.46k
) {
671
3.58k
672
3.58k
    const SCEV *Expr = SE.getSCEV(GEP->getOperand(i));
673
3.58k
674
3.58k
    if (i == 1) {
675
2.76k
      if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
676
2.76k
        Ty = PtrTy->getElementType();
677
2.76k
      } else 
if (auto *0
ArrayTy0
= dyn_cast<ArrayType>(Ty)) {
678
0
        Ty = ArrayTy->getElementType();
679
0
      } else {
680
0
        Subscripts.clear();
681
0
        Sizes.clear();
682
0
        break;
683
0
      }
684
2.76k
      if (auto *Const = dyn_cast<SCEVConstant>(Expr))
685
816
        if (Const->getValue()->isZero()) {
686
603
          DroppedFirstDim = true;
687
603
          continue;
688
603
        }
689
2.16k
      Subscripts.push_back(Expr);
690
2.16k
      continue;
691
2.16k
    }
692
817
693
817
    auto *ArrayTy = dyn_cast<ArrayType>(Ty);
694
817
    if (!ArrayTy) {
695
113
      Subscripts.clear();
696
113
      Sizes.clear();
697
113
      break;
698
113
    }
699
704
700
704
    Subscripts.push_back(Expr);
701
704
    if (!(DroppedFirstDim && 
i == 2513
))
702
267
      Sizes.push_back(ArrayTy->getNumElements());
703
704
704
704
    Ty = ArrayTy->getElementType();
705
704
  }
706
2.76k
707
2.76k
  return std::make_tuple(Subscripts, Sizes);
708
2.76k
}
709
710
llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
711
14.4k
                                           const BoxedLoopsSetTy &BoxedLoops) {
712
14.5k
  while (BoxedLoops.count(L))
713
42
    L = L->getParentLoop();
714
14.4k
  return L;
715
14.4k
}
716
717
llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::BasicBlock *BB,
718
                                           llvm::LoopInfo &LI,
719
14.4k
                                           const BoxedLoopsSetTy &BoxedLoops) {
720
14.4k
  Loop *L = LI.getLoopFor(BB);
721
14.4k
  return getFirstNonBoxedLoopFor(L, LI, BoxedLoops);
722
14.4k
}
723
724
1.27k
bool polly::isDebugCall(Instruction *Inst) {
725
1.27k
  auto *CI = dyn_cast<CallInst>(Inst);
726
1.27k
  if (!CI)
727
1
    return false;
728
1.27k
729
1.27k
  Function *CF = CI->getCalledFunction();
730
1.27k
  if (!CF)
731
5
    return false;
732
1.26k
733
1.26k
  return std::find(DebugFunctions.begin(), DebugFunctions.end(),
734
1.26k
                   CF->getName()) != DebugFunctions.end();
735
1.26k
}
736
737
0
static bool hasDebugCall(BasicBlock *BB) {
738
0
  for (Instruction &Inst : *BB) {
739
0
    if (isDebugCall(&Inst))
740
0
      return true;
741
0
  }
742
0
  return false;
743
0
}
744
745
11.2k
bool polly::hasDebugCall(ScopStmt *Stmt) {
746
11.2k
  // Quick skip if no debug functions have been defined.
747
11.2k
  if (DebugFunctions.empty())
748
11.2k
    return false;
749
7
750
7
  if (!Stmt)
751
0
    return false;
752
7
753
7
  for (Instruction *Inst : Stmt->getInstructions())
754
3
    if (isDebugCall(Inst))
755
2
      return true;
756
7
757
7
  
if (5
Stmt->isRegionStmt()5
) {
758
0
    for (BasicBlock *RBB : Stmt->getRegion()->blocks())
759
0
      if (RBB != Stmt->getEntryBlock() && ::hasDebugCall(RBB))
760
0
        return true;
761
0
  }
762
5
763
5
  return false;
764
5
}