Coverage Report

Created: 2019-04-21 11:35

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/polly/lib/Support/ScopHelper.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- ScopHelper.cpp - Some Helper Functions for Scop.  ------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// Small functions that help with Scop and LLVM-IR.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "polly/Support/ScopHelper.h"
14
#include "polly/Options.h"
15
#include "polly/ScopInfo.h"
16
#include "polly/Support/SCEVValidator.h"
17
#include "llvm/Analysis/LoopInfo.h"
18
#include "llvm/Analysis/RegionInfo.h"
19
#include "llvm/Analysis/ScalarEvolution.h"
20
#include "llvm/Analysis/ScalarEvolutionExpander.h"
21
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
22
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
23
24
using namespace llvm;
25
using namespace polly;
26
27
#define DEBUG_TYPE "polly-scop-helper"
28
29
static cl::opt<bool> PollyAllowErrorBlocks(
30
    "polly-allow-error-blocks",
31
    cl::desc("Allow to speculate on the execution of 'error blocks'."),
32
    cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
33
34
static cl::list<std::string> DebugFunctions(
35
    "polly-debug-func",
36
    cl::desc("Allow calls to the specified functions in SCoPs even if their "
37
             "side-effects are unknown. This can be used to do debug output in "
38
             "Polly-transformed code."),
39
    cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, cl::cat(PollyCategory));
40
41
// Ensures that there is just one predecessor to the entry node from outside the
42
// region.
43
// The identity of the region entry node is preserved.
44
static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI,
45
89
                                RegionInfo *RI) {
46
89
  BasicBlock *EnteringBB = R->getEnteringBlock();
47
89
  BasicBlock *Entry = R->getEntry();
48
89
49
89
  // Before (one of):
50
89
  //
51
89
  //                       \    /            //
52
89
  //                      EnteringBB         //
53
89
  //                        |    \------>    //
54
89
  //   \   /                |                //
55
89
  //   Entry <--\         Entry <--\         //
56
89
  //   /   \    /         /   \    /         //
57
89
  //        ....               ....          //
58
89
59
89
  // Create single entry edge if the region has multiple entry edges.
60
89
  if (!EnteringBB) {
61
2
    SmallVector<BasicBlock *, 4> Preds;
62
2
    for (BasicBlock *P : predecessors(Entry))
63
4
      if (!R->contains(P))
64
4
        Preds.push_back(P);
65
2
66
2
    BasicBlock *NewEntering =
67
2
        SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI);
68
2
69
2
    if (RI) {
70
2
      // The exit block of predecessing regions must be changed to NewEntering
71
4
      for (BasicBlock *ExitPred : predecessors(NewEntering)) {
72
4
        Region *RegionOfPred = RI->getRegionFor(ExitPred);
73
4
        if (RegionOfPred->getExit() != Entry)
74
4
          continue;
75
0
76
0
        while (!RegionOfPred->isTopLevelRegion() &&
77
0
               RegionOfPred->getExit() == Entry) {
78
0
          RegionOfPred->replaceExit(NewEntering);
79
0
          RegionOfPred = RegionOfPred->getParent();
80
0
        }
81
0
      }
82
2
83
2
      // Make all ancestors use EnteringBB as entry; there might be edges to it
84
2
      Region *AncestorR = R->getParent();
85
2
      RI->setRegionFor(NewEntering, AncestorR);
86
4
      while (!AncestorR->isTopLevelRegion() && 
AncestorR->getEntry() == Entry3
) {
87
2
        AncestorR->replaceEntry(NewEntering);
88
2
        AncestorR = AncestorR->getParent();
89
2
      }
90
2
    }
91
2
92
2
    EnteringBB = NewEntering;
93
2
  }
94
89
  assert(R->getEnteringBlock() == EnteringBB);
95
89
96
89
  // After:
97
89
  //
98
89
  //    \    /       //
99
89
  //  EnteringBB     //
100
89
  //      |          //
101
89
  //      |          //
102
89
  //    Entry <--\   //
103
89
  //    /   \    /   //
104
89
  //         ....    //
105
89
}
106
107
// Ensure that the region has a single block that branches to the exit node.
108
static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI,
109
89
                               RegionInfo *RI) {
110
89
  BasicBlock *ExitBB = R->getExit();
111
89
  BasicBlock *ExitingBB = R->getExitingBlock();
112
89
113
89
  // Before:
114
89
  //
115
89
  //   (Region)   ______/  //
116
89
  //      \  |   /         //
117
89
  //       ExitBB          //
118
89
  //       /    \          //
119
89
120
89
  if (!ExitingBB) {
121
17
    SmallVector<BasicBlock *, 4> Preds;
122
17
    for (BasicBlock *P : predecessors(ExitBB))
123
40
      if (R->contains(P))
124
35
        Preds.push_back(P);
125
17
126
17
    //  Preds[0] Preds[1]      otherBB //
127
17
    //         \  |  ________/         //
128
17
    //          \ | /                  //
129
17
    //           BB                    //
130
17
    ExitingBB =
131
17
        SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI);
132
17
    // Preds[0] Preds[1]      otherBB  //
133
17
    //        \  /           /         //
134
17
    // BB.region_exiting    /          //
135
17
    //                  \  /           //
136
17
    //                   BB            //
137
17
138
17
    if (RI)
139
17
      RI->setRegionFor(ExitingBB, R);
140
17
141
17
    // Change the exit of nested regions, but not the region itself,
142
17
    R->replaceExitRecursive(ExitingBB);
143
17
    R->replaceExit(ExitBB);
144
17
  }
145
89
  assert(ExitingBB == R->getExitingBlock());
146
89
147
89
  // After:
148
89
  //
149
89
  //     \   /                //
150
89
  //    ExitingBB     _____/  //
151
89
  //          \      /        //
152
89
  //           ExitBB         //
153
89
  //           /    \         //
154
89
}
155
156
void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI,
157
89
                           RegionInfo *RI) {
158
89
  assert(R && !R->isTopLevelRegion());
159
89
  assert(!RI || RI == R->getRegionInfo());
160
89
  assert((!RI || DT) &&
161
89
         "RegionInfo requires DominatorTree to be updated as well");
162
89
163
89
  simplifyRegionEntry(R, DT, LI, RI);
164
89
  simplifyRegionExit(R, DT, LI, RI);
165
89
  assert(R->isSimple());
166
89
}
167
168
// Split the block into two successive blocks.
169
//
170
// Like llvm::SplitBlock, but also preserves RegionInfo
171
static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt,
172
                              DominatorTree *DT, llvm::LoopInfo *LI,
173
4
                              RegionInfo *RI) {
174
4
  assert(Old && SplitPt);
175
4
176
4
  // Before:
177
4
  //
178
4
  //  \   /  //
179
4
  //   Old   //
180
4
  //  /   \  //
181
4
182
4
  BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI);
183
4
184
4
  if (RI) {
185
0
    Region *R = RI->getRegionFor(Old);
186
0
    RI->setRegionFor(NewBlock, R);
187
0
  }
188
4
189
4
  // After:
190
4
  //
191
4
  //   \   /    //
192
4
  //    Old     //
193
4
  //     |      //
194
4
  //  NewBlock  //
195
4
  //   /   \    //
196
4
197
4
  return NewBlock;
198
4
}
199
200
void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, DominatorTree *DT,
201
4
                                     LoopInfo *LI, RegionInfo *RI) {
202
4
  // Find first non-alloca instruction. Every basic block has a non-alloca
203
4
  // instruction, as every well formed basic block has a terminator.
204
4
  BasicBlock::iterator I = EntryBlock->begin();
205
4
  while (isa<AllocaInst>(I))
206
0
    ++I;
207
4
208
4
  // splitBlock updates DT, LI and RI.
209
4
  splitBlock(EntryBlock, &*I, DT, LI, RI);
210
4
}
211
212
4
void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) {
213
4
  auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
214
4
  auto *DT = DTWP ? &DTWP->getDomTree() : 
nullptr0
;
215
4
  auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>();
216
4
  auto *LI = LIWP ? &LIWP->getLoopInfo() : 
nullptr0
;
217
4
  RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>();
218
4
  RegionInfo *RI = RIP ? 
&RIP->getRegionInfo()0
: nullptr;
219
4
220
4
  // splitBlock updates DT, LI and RI.
221
4
  polly::splitEntryBlockForAlloca(EntryBlock, DT, LI, RI);
222
4
}
223
224
/// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem
225
/// instruction but just use it, if it is referenced as a SCEVUnknown. We want
226
/// however to generate new code if the instruction is in the analyzed region
227
/// and we generate code outside/in front of that region. Hence, we generate the
228
/// code for the SDiv/SRem operands in front of the analyzed region and then
229
/// create a new SDiv/SRem operation there too.
230
struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> {
231
  friend struct SCEVVisitor<ScopExpander, const SCEV *>;
232
233
  explicit ScopExpander(const Region &R, ScalarEvolution &SE,
234
                        const DataLayout &DL, const char *Name, ValueMapT *VMap,
235
                        BasicBlock *RTCBB)
236
      : Expander(SCEVExpander(SE, DL, Name)), SE(SE), Name(Name), R(R),
237
317
        VMap(VMap), RTCBB(RTCBB) {}
238
239
317
  Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) {
240
317
    // If we generate code in the region we will immediately fall back to the
241
317
    // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if
242
317
    // needed replace them by copies computed in the entering block.
243
317
    if (!R.contains(I))
244
317
      E = visit(E);
245
317
    return Expander.expandCodeFor(E, Ty, I);
246
317
  }
247
248
1.10k
  const SCEV *visit(const SCEV *E) {
249
1.10k
    // Cache the expansion results for intermediate SCEV expressions. A SCEV
250
1.10k
    // expression can refer to an operand multiple times (e.g. "x*x), so
251
1.10k
    // a naive visitor takes exponential time.
252
1.10k
    if (SCEVCache.count(E))
253
16
      return SCEVCache[E];
254
1.08k
    const SCEV *Result = SCEVVisitor::visit(E);
255
1.08k
    SCEVCache[E] = Result;
256
1.08k
    return Result;
257
1.08k
  }
258
259
private:
260
  SCEVExpander Expander;
261
  ScalarEvolution &SE;
262
  const char *Name;
263
  const Region &R;
264
  ValueMapT *VMap;
265
  BasicBlock *RTCBB;
266
  DenseMap<const SCEV *, const SCEV *> SCEVCache;
267
268
  const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst,
269
459
                               Instruction *IP) {
270
459
    if (!Inst || 
!R.contains(Inst)182
)
271
459
      return E;
272
0
273
0
    assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() &&
274
0
           !isa<PHINode>(Inst));
275
0
276
0
    auto *InstClone = Inst->clone();
277
0
    for (auto &Op : Inst->operands()) {
278
0
      assert(SE.isSCEVable(Op->getType()));
279
0
      auto *OpSCEV = SE.getSCEV(Op);
280
0
      auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP);
281
0
      InstClone->replaceUsesOfWith(Op, OpClone);
282
0
    }
283
0
284
0
    InstClone->setName(Name + Inst->getName());
285
0
    InstClone->insertBefore(IP);
286
0
    return SE.getSCEV(InstClone);
287
0
  }
288
289
474
  const SCEV *visitUnknown(const SCEVUnknown *E) {
290
474
291
474
    // If a value mapping was given try if the underlying value is remapped.
292
474
    Value *NewVal = VMap ? 
VMap->lookup(E->getValue())472
:
nullptr2
;
293
474
    if (NewVal) {
294
17
      auto *NewE = SE.getSCEV(NewVal);
295
17
296
17
      // While the mapped value might be different the SCEV representation might
297
17
      // not be. To this end we will check before we go into recursion here.
298
17
      if (E != NewE)
299
15
        return visit(NewE);
300
459
    }
301
459
302
459
    Instruction *Inst = dyn_cast<Instruction>(E->getValue());
303
459
    Instruction *IP;
304
459
    if (Inst && 
!R.contains(Inst)182
)
305
182
      IP = Inst;
306
277
    else if (Inst && 
RTCBB->getParent() == Inst->getFunction()0
)
307
0
      IP = RTCBB->getTerminator();
308
277
    else
309
277
      IP = RTCBB->getParent()->getEntryBlock().getTerminator();
310
459
311
459
    if (!Inst || 
(182
Inst->getOpcode() != Instruction::SRem182
&&
312
182
                  Inst->getOpcode() != Instruction::SDiv))
313
459
      return visitGenericInst(E, Inst, IP);
314
0
315
0
    const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0));
316
0
    const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1));
317
0
318
0
    if (!SE.isKnownNonZero(RHSScev))
319
0
      RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
320
0
321
0
    Value *LHS = expandCodeFor(LHSScev, E->getType(), IP);
322
0
    Value *RHS = expandCodeFor(RHSScev, E->getType(), IP);
323
0
324
0
    Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(),
325
0
                                  LHS, RHS, Inst->getName() + Name, IP);
326
0
    return SE.getSCEV(Inst);
327
0
  }
328
329
  /// The following functions will just traverse the SCEV and rebuild it with
330
  /// the new operands returned by the traversal.
331
  ///
332
  ///{
333
246
  const SCEV *visitConstant(const SCEVConstant *E) { return E; }
334
14
  const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) {
335
14
    return SE.getTruncateExpr(visit(E->getOperand()), E->getType());
336
14
  }
337
10
  const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) {
338
10
    return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType());
339
10
  }
340
8
  const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) {
341
8
    return SE.getSignExtendExpr(visit(E->getOperand()), E->getType());
342
8
  }
343
2
  const SCEV *visitUDivExpr(const SCEVUDivExpr *E) {
344
2
    auto *RHSScev = visit(E->getRHS());
345
2
    if (!SE.isKnownNonZero(RHSScev))
346
1
      RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
347
2
    return SE.getUDivExpr(visit(E->getLHS()), RHSScev);
348
2
  }
349
165
  const SCEV *visitAddExpr(const SCEVAddExpr *E) {
350
165
    SmallVector<const SCEV *, 4> NewOps;
351
165
    for (const SCEV *Op : E->operands())
352
377
      NewOps.push_back(visit(Op));
353
165
    return SE.getAddExpr(NewOps);
354
165
  }
355
157
  const SCEV *visitMulExpr(const SCEVMulExpr *E) {
356
157
    SmallVector<const SCEV *, 4> NewOps;
357
157
    for (const SCEV *Op : E->operands())
358
335
      NewOps.push_back(visit(Op));
359
157
    return SE.getMulExpr(NewOps);
360
157
  }
361
1
  const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
362
1
    SmallVector<const SCEV *, 4> NewOps;
363
1
    for (const SCEV *Op : E->operands())
364
2
      NewOps.push_back(visit(Op));
365
1
    return SE.getUMaxExpr(NewOps);
366
1
  }
367
0
  const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
368
0
    SmallVector<const SCEV *, 4> NewOps;
369
0
    for (const SCEV *Op : E->operands())
370
0
      NewOps.push_back(visit(Op));
371
0
    return SE.getSMaxExpr(NewOps);
372
0
  }
373
11
  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
374
11
    SmallVector<const SCEV *, 4> NewOps;
375
11
    for (const SCEV *Op : E->operands())
376
22
      NewOps.push_back(visit(Op));
377
11
    return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags());
378
11
  }
379
  ///}
380
};
381
382
Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL,
383
                            const char *Name, const SCEV *E, Type *Ty,
384
                            Instruction *IP, ValueMapT *VMap,
385
317
                            BasicBlock *RTCBB) {
386
317
  ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap, RTCBB);
387
317
  return Expander.expandCodeFor(E, Ty, IP);
388
317
}
389
390
bool polly::isErrorBlock(BasicBlock &BB, const Region &R, LoopInfo &LI,
391
41.8k
                         const DominatorTree &DT) {
392
41.8k
  if (!PollyAllowErrorBlocks)
393
0
    return false;
394
41.8k
395
41.8k
  if (isa<UnreachableInst>(BB.getTerminator()))
396
0
    return true;
397
41.8k
398
41.8k
  if (LI.isLoopHeader(&BB))
399
14.4k
    return false;
400
27.4k
401
27.4k
  // Basic blocks that are always executed are not considered error blocks,
402
27.4k
  // as their execution can not be a rare event.
403
27.4k
  bool DominatesAllPredecessors = true;
404
27.4k
  if (R.isTopLevelRegion()) {
405
0
    for (BasicBlock &I : *R.getEntry()->getParent())
406
0
      if (isa<ReturnInst>(I.getTerminator()) && !DT.dominates(&BB, &I))
407
0
        DominatesAllPredecessors = false;
408
27.4k
  } else {
409
27.4k
    for (auto Pred : predecessors(R.getExit()))
410
42.3k
      if (R.contains(Pred) && 
!DT.dominates(&BB, Pred)39.9k
)
411
15.4k
        DominatesAllPredecessors = false;
412
27.4k
  }
413
27.4k
414
27.4k
  if (DominatesAllPredecessors)
415
13.0k
    return false;
416
14.3k
417
14.3k
  for (Instruction &Inst : BB)
418
118k
    if (CallInst *CI = dyn_cast<CallInst>(&Inst)) {
419
210
      if (isDebugCall(CI))
420
0
        continue;
421
210
422
210
      if (isIgnoredIntrinsic(CI))
423
54
        continue;
424
156
425
156
      // memset, memcpy and memmove are modeled intrinsics.
426
156
      if (isa<MemSetInst>(CI) || 
isa<MemTransferInst>(CI)138
)
427
26
        continue;
428
130
429
130
      if (!CI->doesNotAccessMemory())
430
108
        return true;
431
22
      if (CI->doesNotReturn())
432
0
        return true;
433
22
    }
434
14.3k
435
14.3k
  
return false14.2k
;
436
14.3k
}
437
438
11.4k
Value *polly::getConditionFromTerminator(Instruction *TI) {
439
11.4k
  if (BranchInst *BR = dyn_cast<BranchInst>(TI)) {
440
11.4k
    if (BR->isUnconditional())
441
5.22k
      return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext()));
442
6.19k
443
6.19k
    return BR->getCondition();
444
6.19k
  }
445
15
446
15
  if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
447
15
    return SI->getCondition();
448
0
449
0
  return nullptr;
450
0
}
451
452
static bool hasVariantIndex(GetElementPtrInst *Gep, Loop *L, Region &R,
453
356
                            ScalarEvolution &SE) {
454
479
  for (const Use &Val : llvm::drop_begin(Gep->operands(), 1)) {
455
479
    const SCEV *PtrSCEV = SE.getSCEVAtScope(Val, L);
456
479
    Loop *OuterLoop = R.outermostLoopInRegion(L);
457
479
    if (!SE.isLoopInvariant(PtrSCEV, OuterLoop))
458
14
      return true;
459
479
  }
460
356
  
return false342
;
461
356
}
462
463
bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI,
464
                            ScalarEvolution &SE, const DominatorTree &DT,
465
941
                            const InvariantLoadsSetTy &KnownInvariantLoads) {
466
941
  Loop *L = LI.getLoopFor(LInst->getParent());
467
941
  auto *Ptr = LInst->getPointerOperand();
468
941
469
941
  // A LoadInst is hoistable if the address it is loading from is also
470
941
  // invariant; in this case: another invariant load (whether that address
471
941
  // is also not written to has to be checked separately)
472
941
  // TODO: This only checks for a LoadInst->GetElementPtrInst->LoadInst
473
941
  // pattern generated by the Chapel frontend, but generally this applies
474
941
  // for any chain of instruction that does not also depend on any
475
941
  // induction variable
476
941
  if (auto *GepInst = dyn_cast<GetElementPtrInst>(Ptr)) {
477
356
    if (!hasVariantIndex(GepInst, L, R, SE)) {
478
342
      if (auto *DecidingLoad =
479
12
              dyn_cast<LoadInst>(GepInst->getPointerOperand())) {
480
12
        if (KnownInvariantLoads.count(DecidingLoad))
481
12
          return true;
482
929
      }
483
342
    }
484
356
  }
485
929
486
929
  const SCEV *PtrSCEV = SE.getSCEVAtScope(Ptr, L);
487
1.07k
  while (L && 
R.contains(L)297
) {
488
151
    if (!SE.isLoopInvariant(PtrSCEV, L))
489
8
      return false;
490
143
    L = L->getParentLoop();
491
143
  }
492
929
493
5.36k
  
for (auto *User : Ptr->users())921
{
494
5.36k
    auto *UserI = dyn_cast<Instruction>(User);
495
5.36k
    if (!UserI || !R.contains(UserI))
496
610
      continue;
497
4.75k
    if (!UserI->mayWriteToMemory())
498
4.37k
      continue;
499
388
500
388
    auto &BB = *UserI->getParent();
501
388
    if (DT.dominates(&BB, LInst->getParent()))
502
120
      return false;
503
268
504
268
    bool DominatesAllPredecessors = true;
505
268
    if (R.isTopLevelRegion()) {
506
1
      for (BasicBlock &I : *R.getEntry()->getParent())
507
5
        if (isa<ReturnInst>(I.getTerminator()) && 
!DT.dominates(&BB, &I)1
)
508
1
          DominatesAllPredecessors = false;
509
267
    } else {
510
267
      for (auto Pred : predecessors(R.getExit()))
511
526
        if (R.contains(Pred) && !DT.dominates(&BB, Pred))
512
492
          DominatesAllPredecessors = false;
513
267
    }
514
268
515
268
    if (!DominatesAllPredecessors)
516
268
      continue;
517
0
518
0
    return false;
519
0
  }
520
921
521
921
  
return true801
;
522
921
}
523
524
5.88k
bool polly::isIgnoredIntrinsic(const Value *V) {
525
5.88k
  if (auto *IT = dyn_cast<IntrinsicInst>(V)) {
526
124
    switch (IT->getIntrinsicID()) {
527
124
    // Lifetime markers are supported/ignored.
528
124
    case llvm::Intrinsic::lifetime_start:
529
69
    case llvm::Intrinsic::lifetime_end:
530
69
    // Invariant markers are supported/ignored.
531
69
    case llvm::Intrinsic::invariant_start:
532
69
    case llvm::Intrinsic::invariant_end:
533
69
    // Some misc annotations are supported/ignored.
534
69
    case llvm::Intrinsic::var_annotation:
535
69
    case llvm::Intrinsic::ptr_annotation:
536
69
    case llvm::Intrinsic::annotation:
537
69
    case llvm::Intrinsic::donothing:
538
69
    case llvm::Intrinsic::assume:
539
69
    // Some debug info intrinsics are supported/ignored.
540
69
    case llvm::Intrinsic::dbg_value:
541
69
    case llvm::Intrinsic::dbg_declare:
542
69
      return true;
543
69
    default:
544
55
      break;
545
5.81k
    }
546
5.81k
  }
547
5.81k
  return false;
548
5.81k
}
549
550
bool polly::canSynthesize(const Value *V, const Scop &S, ScalarEvolution *SE,
551
9.75k
                          Loop *Scope) {
552
9.75k
  if (!V || !SE->isSCEVable(V->getType()))
553
1.43k
    return false;
554
8.32k
555
8.32k
  const InvariantLoadsSetTy &ILS = S.getRequiredInvariantLoads();
556
8.32k
  if (const SCEV *Scev = SE->getSCEVAtScope(const_cast<Value *>(V), Scope))
557
8.32k
    if (!isa<SCEVCouldNotCompute>(Scev))
558
8.32k
      if (!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false, ILS))
559
5.45k
        return true;
560
2.86k
561
2.86k
  return false;
562
2.86k
}
563
564
6.52k
llvm::BasicBlock *polly::getUseBlock(const llvm::Use &U) {
565
6.52k
  Instruction *UI = dyn_cast<Instruction>(U.getUser());
566
6.52k
  if (!UI)
567
0
    return nullptr;
568
6.52k
569
6.52k
  if (PHINode *PHI = dyn_cast<PHINode>(UI))
570
705
    return PHI->getIncomingBlock(U);
571
5.81k
572
5.81k
  return UI->getParent();
573
5.81k
}
574
575
std::tuple<std::vector<const SCEV *>, std::vector<int>>
576
975
polly::getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) {
577
975
  std::vector<const SCEV *> Subscripts;
578
975
  std::vector<int> Sizes;
579
975
580
975
  Type *Ty = GEP->getPointerOperandType();
581
975
582
975
  bool DroppedFirstDim = false;
583
975
584
2.21k
  for (unsigned i = 1; i < GEP->getNumOperands(); 
i++1.23k
) {
585
1.27k
586
1.27k
    const SCEV *Expr = SE.getSCEV(GEP->getOperand(i));
587
1.27k
588
1.27k
    if (i == 1) {
589
975
      if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
590
975
        Ty = PtrTy->getElementType();
591
975
      } else 
if (auto *0
ArrayTy0
= dyn_cast<ArrayType>(Ty)) {
592
0
        Ty = ArrayTy->getElementType();
593
0
      } else {
594
0
        Subscripts.clear();
595
0
        Sizes.clear();
596
0
        break;
597
0
      }
598
975
      if (auto *Const = dyn_cast<SCEVConstant>(Expr))
599
352
        if (Const->getValue()->isZero()) {
600
221
          DroppedFirstDim = true;
601
221
          continue;
602
221
        }
603
754
      Subscripts.push_back(Expr);
604
754
      continue;
605
754
    }
606
295
607
295
    auto *ArrayTy = dyn_cast<ArrayType>(Ty);
608
295
    if (!ArrayTy) {
609
34
      Subscripts.clear();
610
34
      Sizes.clear();
611
34
      break;
612
34
    }
613
261
614
261
    Subscripts.push_back(Expr);
615
261
    if (!(DroppedFirstDim && 
i == 2201
))
616
92
      Sizes.push_back(ArrayTy->getNumElements());
617
261
618
261
    Ty = ArrayTy->getElementType();
619
261
  }
620
975
621
975
  return std::make_tuple(Subscripts, Sizes);
622
975
}
623
624
llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
625
4.61k
                                           const BoxedLoopsSetTy &BoxedLoops) {
626
4.62k
  while (BoxedLoops.count(L))
627
16
    L = L->getParentLoop();
628
4.61k
  return L;
629
4.61k
}
630
631
llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::BasicBlock *BB,
632
                                           llvm::LoopInfo &LI,
633
4.61k
                                           const BoxedLoopsSetTy &BoxedLoops) {
634
4.61k
  Loop *L = LI.getLoopFor(BB);
635
4.61k
  return getFirstNonBoxedLoopFor(L, LI, BoxedLoops);
636
4.61k
}
637
638
239
bool polly::isDebugCall(Instruction *Inst) {
639
239
  auto *CI = dyn_cast<CallInst>(Inst);
640
239
  if (!CI)
641
0
    return false;
642
239
643
239
  Function *CF = CI->getCalledFunction();
644
239
  if (!CF)
645
0
    return false;
646
239
647
239
  return std::find(DebugFunctions.begin(), DebugFunctions.end(),
648
239
                   CF->getName()) != DebugFunctions.end();
649
239
}
650
651
0
static bool hasDebugCall(BasicBlock *BB) {
652
0
  for (Instruction &Inst : *BB) {
653
0
    if (isDebugCall(&Inst))
654
0
      return true;
655
0
  }
656
0
  return false;
657
0
}
658
659
3.61k
bool polly::hasDebugCall(ScopStmt *Stmt) {
660
3.61k
  // Quick skip if no debug functions have been defined.
661
3.61k
  if (DebugFunctions.empty())
662
3.61k
    return false;
663
0
664
0
  if (!Stmt)
665
0
    return false;
666
0
667
0
  for (Instruction *Inst : Stmt->getInstructions())
668
0
    if (isDebugCall(Inst))
669
0
      return true;
670
0
671
0
  if (Stmt->isRegionStmt()) {
672
0
    for (BasicBlock *RBB : Stmt->getRegion()->blocks())
673
0
      if (RBB != Stmt->getEntryBlock() && ::hasDebugCall(RBB))
674
0
        return true;
675
0
  }
676
0
677
0
  return false;
678
0
}