Coverage Report

Created: 2017-06-23 12:40

/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/tools/polly/lib/Support/ScopHelper.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- ScopHelper.cpp - Some Helper Functions for Scop.  ------------------===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// Small functions that help with Scop and LLVM-IR.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#include "polly/Support/ScopHelper.h"
15
#include "polly/Options.h"
16
#include "polly/ScopInfo.h"
17
#include "polly/Support/SCEVValidator.h"
18
#include "llvm/Analysis/LoopInfo.h"
19
#include "llvm/Analysis/RegionInfo.h"
20
#include "llvm/Analysis/ScalarEvolution.h"
21
#include "llvm/Analysis/ScalarEvolutionExpander.h"
22
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
23
#include "llvm/IR/CFG.h"
24
#include "llvm/IR/IntrinsicInst.h"
25
#include "llvm/Support/Debug.h"
26
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
27
28
using namespace llvm;
29
using namespace polly;
30
31
#define DEBUG_TYPE "polly-scop-helper"
32
33
static cl::opt<bool> PollyAllowErrorBlocks(
34
    "polly-allow-error-blocks",
35
    cl::desc("Allow to speculate on the execution of 'error blocks'."),
36
    cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
37
38
// Ensures that there is just one predecessor to the entry node from outside the
39
// region.
40
// The identity of the region entry node is preserved.
41
static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI,
42
280
                                RegionInfo *RI) {
43
280
  BasicBlock *EnteringBB = R->getEnteringBlock();
44
280
  BasicBlock *Entry = R->getEntry();
45
280
46
280
  // Before (one of):
47
280
  //
48
280
  //                       \    /            //
49
280
  //                      EnteringBB         //
50
280
  //                        |    \------>    //
51
280
  //   \   /                |                //
52
280
  //   Entry <--\         Entry <--\         //
53
280
  //   /   \    /         /   \    /         //
54
280
  //        ....               ....          //
55
280
56
280
  // Create single entry edge if the region has multiple entry edges.
57
280
  if (
!EnteringBB280
)
{8
58
8
    SmallVector<BasicBlock *, 4> Preds;
59
8
    for (BasicBlock *P : predecessors(Entry))
60
18
      
if (18
!R->contains(P)18
)
61
17
        Preds.push_back(P);
62
8
63
8
    BasicBlock *NewEntering =
64
8
        SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI);
65
8
66
8
    if (
RI8
)
{8
67
8
      // The exit block of predecessing regions must be changed to NewEntering
68
17
      for (BasicBlock *ExitPred : predecessors(NewEntering)) {
69
17
        Region *RegionOfPred = RI->getRegionFor(ExitPred);
70
17
        if (RegionOfPred->getExit() != Entry)
71
15
          continue;
72
17
73
4
        
while (2
!RegionOfPred->isTopLevelRegion() &&4
74
2
               
RegionOfPred->getExit() == Entry2
)
{2
75
2
          RegionOfPred->replaceExit(NewEntering);
76
2
          RegionOfPred = RegionOfPred->getParent();
77
2
        }
78
2
      }
79
8
80
8
      // Make all ancestors use EnteringBB as entry; there might be edges to it
81
8
      Region *AncestorR = R->getParent();
82
8
      RI->setRegionFor(NewEntering, AncestorR);
83
15
      while (
!AncestorR->isTopLevelRegion() && 15
AncestorR->getEntry() == Entry8
)
{7
84
7
        AncestorR->replaceEntry(NewEntering);
85
7
        AncestorR = AncestorR->getParent();
86
7
      }
87
8
    }
88
8
89
8
    EnteringBB = NewEntering;
90
8
  }
91
280
  assert(R->getEnteringBlock() == EnteringBB);
92
280
93
280
  // After:
94
280
  //
95
280
  //    \    /       //
96
280
  //  EnteringBB     //
97
280
  //      |          //
98
280
  //      |          //
99
280
  //    Entry <--\   //
100
280
  //    /   \    /   //
101
280
  //         ....    //
102
280
}
103
104
// Ensure that the region has a single block that branches to the exit node.
105
static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI,
106
280
                               RegionInfo *RI) {
107
280
  BasicBlock *ExitBB = R->getExit();
108
280
  BasicBlock *ExitingBB = R->getExitingBlock();
109
280
110
280
  // Before:
111
280
  //
112
280
  //   (Region)   ______/  //
113
280
  //      \  |   /         //
114
280
  //       ExitBB          //
115
280
  //       /    \          //
116
280
117
280
  if (
!ExitingBB280
)
{62
118
62
    SmallVector<BasicBlock *, 4> Preds;
119
62
    for (BasicBlock *P : predecessors(ExitBB))
120
147
      
if (147
R->contains(P)147
)
121
130
        Preds.push_back(P);
122
62
123
62
    //  Preds[0] Preds[1]      otherBB //
124
62
    //         \  |  ________/         //
125
62
    //          \ | /                  //
126
62
    //           BB                    //
127
62
    ExitingBB =
128
62
        SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI);
129
62
    // Preds[0] Preds[1]      otherBB  //
130
62
    //        \  /           /         //
131
62
    // BB.region_exiting    /          //
132
62
    //                  \  /           //
133
62
    //                   BB            //
134
62
135
62
    if (RI)
136
62
      RI->setRegionFor(ExitingBB, R);
137
62
138
62
    // Change the exit of nested regions, but not the region itself,
139
62
    R->replaceExitRecursive(ExitingBB);
140
62
    R->replaceExit(ExitBB);
141
62
  }
142
280
  assert(ExitingBB == R->getExitingBlock());
143
280
144
280
  // After:
145
280
  //
146
280
  //     \   /                //
147
280
  //    ExitingBB     _____/  //
148
280
  //          \      /        //
149
280
  //           ExitBB         //
150
280
  //           /    \         //
151
280
}
152
153
void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI,
154
280
                           RegionInfo *RI) {
155
280
  assert(R && !R->isTopLevelRegion());
156
280
  assert(!RI || RI == R->getRegionInfo());
157
280
  assert((!RI || DT) &&
158
280
         "RegionInfo requires DominatorTree to be updated as well");
159
280
160
280
  simplifyRegionEntry(R, DT, LI, RI);
161
280
  simplifyRegionExit(R, DT, LI, RI);
162
280
  assert(R->isSimple());
163
280
}
164
165
// Split the block into two successive blocks.
166
//
167
// Like llvm::SplitBlock, but also preserves RegionInfo
168
static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt,
169
                              DominatorTree *DT, llvm::LoopInfo *LI,
170
13
                              RegionInfo *RI) {
171
13
  assert(Old && SplitPt);
172
13
173
13
  // Before:
174
13
  //
175
13
  //  \   /  //
176
13
  //   Old   //
177
13
  //  /   \  //
178
13
179
13
  BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI);
180
13
181
13
  if (
RI13
)
{0
182
0
    Region *R = RI->getRegionFor(Old);
183
0
    RI->setRegionFor(NewBlock, R);
184
0
  }
185
13
186
13
  // After:
187
13
  //
188
13
  //   \   /    //
189
13
  //    Old     //
190
13
  //     |      //
191
13
  //  NewBlock  //
192
13
  //   /   \    //
193
13
194
13
  return NewBlock;
195
13
}
196
197
13
void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) {
198
13
  // Find first non-alloca instruction. Every basic block has a non-alloca
199
13
  // instruction, as every well formed basic block has a terminator.
200
13
  BasicBlock::iterator I = EntryBlock->begin();
201
13
  while (isa<AllocaInst>(I))
202
0
    ++I;
203
13
204
13
  auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
205
13
  auto *DT = DTWP ? 
&DTWP->getDomTree()13
:
nullptr0
;
206
13
  auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>();
207
13
  auto *LI = LIWP ? 
&LIWP->getLoopInfo()13
:
nullptr0
;
208
13
  RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>();
209
13
  RegionInfo *RI = RIP ? 
&RIP->getRegionInfo()0
:
nullptr13
;
210
13
211
13
  // splitBlock updates DT, LI and RI.
212
13
  splitBlock(EntryBlock, &*I, DT, LI, RI);
213
13
}
214
215
/// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem
216
/// instruction but just use it, if it is referenced as a SCEVUnknown. We want
217
/// however to generate new code if the instruction is in the analyzed region
218
/// and we generate code outside/in front of that region. Hence, we generate the
219
/// code for the SDiv/SRem operands in front of the analyzed region and then
220
/// create a new SDiv/SRem operation there too.
221
struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> {
222
  friend struct SCEVVisitor<ScopExpander, const SCEV *>;
223
224
  explicit ScopExpander(const Region &R, ScalarEvolution &SE,
225
                        const DataLayout &DL, const char *Name, ValueMapT *VMap,
226
                        BasicBlock *RTCBB)
227
      : Expander(SCEVExpander(SE, DL, Name)), SE(SE), Name(Name), R(R),
228
972
        VMap(VMap), RTCBB(RTCBB) {}
229
230
980
  Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) {
231
980
    // If we generate code in the region we will immediately fall back to the
232
980
    // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if
233
980
    // needed replace them by copies computed in the entering block.
234
980
    if (!R.contains(I))
235
980
      E = visit(E);
236
980
    return Expander.expandCodeFor(E, Ty, I);
237
980
  }
238
239
private:
240
  SCEVExpander Expander;
241
  ScalarEvolution &SE;
242
  const char *Name;
243
  const Region &R;
244
  ValueMapT *VMap;
245
  BasicBlock *RTCBB;
246
247
  const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst,
248
1.38k
                               Instruction *IP) {
249
1.38k
    if (
!Inst || 1.38k
!R.contains(Inst)529
)
250
1.38k
      return E;
251
1.38k
252
2
    assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() &&
253
2
           !isa<PHINode>(Inst));
254
2
255
2
    auto *InstClone = Inst->clone();
256
2
    for (auto &Op : Inst->operands()) {
257
2
      assert(SE.isSCEVable(Op->getType()));
258
2
      auto *OpSCEV = SE.getSCEV(Op);
259
2
      auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP);
260
2
      InstClone->replaceUsesOfWith(Op, OpClone);
261
2
    }
262
2
263
2
    InstClone->setName(Name + Inst->getName());
264
2
    InstClone->insertBefore(IP);
265
2
    return SE.getSCEV(InstClone);
266
1.38k
  }
267
268
1.42k
  const SCEV *visitUnknown(const SCEVUnknown *E) {
269
1.42k
270
1.42k
    // If a value mapping was given try if the underlying value is remapped.
271
1.38k
    Value *NewVal = VMap ? 
VMap->lookup(E->getValue())1.38k
:
nullptr32
;
272
1.42k
    if (
NewVal1.42k
)
{38
273
38
      auto *NewE = SE.getSCEV(NewVal);
274
38
275
38
      // While the mapped value might be different the SCEV representation might
276
38
      // not be. To this end we will check before we go into recursion here.
277
38
      if (E != NewE)
278
35
        return visit(NewE);
279
38
    }
280
1.42k
281
1.38k
    Instruction *Inst = dyn_cast<Instruction>(E->getValue());
282
1.38k
    Instruction *IP;
283
1.38k
    if (
Inst && 1.38k
!R.contains(Inst)532
)
284
530
      IP = Inst;
285
856
    else 
if (856
Inst && 856
RTCBB->getParent() == Inst->getFunction()2
)
286
2
      IP = RTCBB->getTerminator();
287
856
    else
288
854
      IP = RTCBB->getParent()->getEntryBlock().getTerminator();
289
1.38k
290
1.38k
    if (
!Inst || 1.38k
(Inst->getOpcode() != Instruction::SRem &&532
291
532
                  Inst->getOpcode() != Instruction::SDiv))
292
1.38k
      return visitGenericInst(E, Inst, IP);
293
1.38k
294
3
    const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0));
295
3
    const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1));
296
3
297
3
    if (!SE.isKnownNonZero(RHSScev))
298
1
      RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
299
3
300
3
    Value *LHS = expandCodeFor(LHSScev, E->getType(), IP);
301
3
    Value *RHS = expandCodeFor(RHSScev, E->getType(), IP);
302
3
303
3
    Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(),
304
3
                                  LHS, RHS, Inst->getName() + Name, IP);
305
3
    return SE.getSCEV(Inst);
306
1.38k
  }
307
308
  /// The following functions will just traverse the SCEV and rebuild it with
309
  /// the new operands returned by the traversal.
310
  ///
311
  ///{
312
667
  const SCEV *visitConstant(const SCEVConstant *E) { return E; }
313
84
  const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) {
314
84
    return SE.getTruncateExpr(visit(E->getOperand()), E->getType());
315
84
  }
316
10
  const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) {
317
10
    return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType());
318
10
  }
319
41
  const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) {
320
41
    return SE.getSignExtendExpr(visit(E->getOperand()), E->getType());
321
41
  }
322
11
  const SCEV *visitUDivExpr(const SCEVUDivExpr *E) {
323
11
    auto *RHSScev = visit(E->getRHS());
324
11
    if (!SE.isKnownNonZero(RHSScev))
325
8
      RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
326
11
    return SE.getUDivExpr(visit(E->getLHS()), RHSScev);
327
11
  }
328
468
  const SCEV *visitAddExpr(const SCEVAddExpr *E) {
329
468
    SmallVector<const SCEV *, 4> NewOps;
330
468
    for (const SCEV *Op : E->operands())
331
1.05k
      NewOps.push_back(visit(Op));
332
468
    return SE.getAddExpr(NewOps);
333
468
  }
334
400
  const SCEV *visitMulExpr(const SCEVMulExpr *E) {
335
400
    SmallVector<const SCEV *, 4> NewOps;
336
400
    for (const SCEV *Op : E->operands())
337
831
      NewOps.push_back(visit(Op));
338
400
    return SE.getMulExpr(NewOps);
339
400
  }
340
3
  const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
341
3
    SmallVector<const SCEV *, 4> NewOps;
342
3
    for (const SCEV *Op : E->operands())
343
6
      NewOps.push_back(visit(Op));
344
3
    return SE.getUMaxExpr(NewOps);
345
3
  }
346
0
  const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
347
0
    SmallVector<const SCEV *, 4> NewOps;
348
0
    for (const SCEV *Op : E->operands())
349
0
      NewOps.push_back(visit(Op));
350
0
    return SE.getSMaxExpr(NewOps);
351
0
  }
352
37
  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
353
37
    SmallVector<const SCEV *, 4> NewOps;
354
37
    for (const SCEV *Op : E->operands())
355
74
      NewOps.push_back(visit(Op));
356
37
    return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags());
357
37
  }
358
  ///}
359
};
360
361
Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL,
362
                            const char *Name, const SCEV *E, Type *Ty,
363
                            Instruction *IP, ValueMapT *VMap,
364
972
                            BasicBlock *RTCBB) {
365
972
  ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap, RTCBB);
366
972
  return Expander.expandCodeFor(E, Ty, IP);
367
972
}
368
369
bool polly::isErrorBlock(BasicBlock &BB, const Region &R, LoopInfo &LI,
370
85.0k
                         const DominatorTree &DT) {
371
85.0k
  if (!PollyAllowErrorBlocks)
372
0
    return false;
373
85.0k
374
85.0k
  
if (85.0k
isa<UnreachableInst>(BB.getTerminator())85.0k
)
375
0
    return true;
376
85.0k
377
85.0k
  
if (85.0k
LI.isLoopHeader(&BB)85.0k
)
378
32.3k
    return false;
379
85.0k
380
85.0k
  // Basic blocks that are always executed are not considered error blocks,
381
85.0k
  // as their execution can not be a rare event.
382
52.7k
  bool DominatesAllPredecessors = true;
383
52.7k
  if (
R.isTopLevelRegion()52.7k
)
{53
384
53
    for (BasicBlock &I : *R.getEntry()->getParent())
385
298
      
if (298
isa<ReturnInst>(I.getTerminator()) && 298
!DT.dominates(&BB, &I)53
)
386
12
        DominatesAllPredecessors = false;
387
52.7k
  } else {
388
52.7k
    for (auto Pred : predecessors(R.getExit()))
389
84.5k
      
if (84.5k
R.contains(Pred) && 84.5k
!DT.dominates(&BB, Pred)79.2k
)
390
38.5k
        DominatesAllPredecessors = false;
391
52.7k
  }
392
52.7k
393
52.7k
  if (DominatesAllPredecessors)
394
19.7k
    return false;
395
52.7k
396
52.7k
  // FIXME: This is a simple heuristic to determine if the load is executed
397
52.7k
  //        in a conditional. However, we actually would need the control
398
52.7k
  //        condition, i.e., the post dominance frontier. Alternatively we
399
52.7k
  //        could walk up the dominance tree until we find a block that is
400
52.7k
  //        not post dominated by the load and check if it is a conditional
401
52.7k
  //        or a loop header.
402
33.0k
  auto *DTNode = DT.getNode(&BB);
403
33.0k
  auto *IDomBB = DTNode->getIDom()->getBlock();
404
33.0k
  if (LI.isLoopHeader(IDomBB))
405
16.9k
    return false;
406
33.0k
407
16.1k
  for (Instruction &Inst : BB)
408
49.0k
    
if (CallInst *49.0k
CI49.0k
= dyn_cast<CallInst>(&Inst))
{326
409
326
      if (isIgnoredIntrinsic(CI))
410
57
        return false;
411
326
412
269
      
if (269
!CI->doesNotAccessMemory()269
)
413
249
        return true;
414
20
      
if (20
CI->doesNotReturn()20
)
415
0
        return true;
416
20
    }
417
16.1k
418
15.7k
  return false;
419
16.1k
}
420
421
28.4k
Value *polly::getConditionFromTerminator(TerminatorInst *TI) {
422
28.4k
  if (BranchInst *
BR28.4k
= dyn_cast<BranchInst>(TI))
{28.3k
423
28.3k
    if (BR->isUnconditional())
424
12.9k
      return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext()));
425
28.3k
426
15.4k
    return BR->getCondition();
427
28.3k
  }
428
28.4k
429
66
  
if (SwitchInst *66
SI66
= dyn_cast<SwitchInst>(TI))
430
66
    return SI->getCondition();
431
66
432
0
  return nullptr;
433
66
}
434
435
bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI,
436
2.95k
                            ScalarEvolution &SE, const DominatorTree &DT) {
437
2.95k
  Loop *L = LI.getLoopFor(LInst->getParent());
438
2.95k
  auto *Ptr = LInst->getPointerOperand();
439
2.95k
  const SCEV *PtrSCEV = SE.getSCEVAtScope(Ptr, L);
440
3.25k
  while (
L && 3.25k
R.contains(L)825
)
{322
441
322
    if (!SE.isLoopInvariant(PtrSCEV, L))
442
17
      return false;
443
305
    L = L->getParentLoop();
444
305
  }
445
2.95k
446
21.3k
  
for (auto *User : Ptr->users()) 2.93k
{21.3k
447
21.3k
    auto *UserI = dyn_cast<Instruction>(User);
448
21.3k
    if (
!UserI || 21.3k
!R.contains(UserI)21.3k
)
449
3.05k
      continue;
450
18.3k
    
if (18.3k
!UserI->mayWriteToMemory()18.3k
)
451
17.9k
      continue;
452
18.3k
453
388
    auto &BB = *UserI->getParent();
454
388
    if (DT.dominates(&BB, LInst->getParent()))
455
121
      return false;
456
388
457
267
    bool DominatesAllPredecessors = true;
458
267
    for (auto Pred : predecessors(R.getExit()))
459
526
      
if (526
R.contains(Pred) && 526
!DT.dominates(&BB, Pred)526
)
460
492
        DominatesAllPredecessors = false;
461
267
462
267
    if (!DominatesAllPredecessors)
463
267
      continue;
464
267
465
0
    return false;
466
267
  }
467
2.93k
468
2.81k
  return true;
469
2.93k
}
470
471
32.7k
bool polly::isIgnoredIntrinsic(const Value *V) {
472
32.7k
  if (auto *
IT32.7k
= dyn_cast<IntrinsicInst>(V))
{245
473
245
    switch (IT->getIntrinsicID()) {
474
245
    // Lifetime markers are supported/ignored.
475
151
    case llvm::Intrinsic::lifetime_start:
476
151
    case llvm::Intrinsic::lifetime_end:
477
151
    // Invariant markers are supported/ignored.
478
151
    case llvm::Intrinsic::invariant_start:
479
151
    case llvm::Intrinsic::invariant_end:
480
151
    // Some misc annotations are supported/ignored.
481
151
    case llvm::Intrinsic::var_annotation:
482
151
    case llvm::Intrinsic::ptr_annotation:
483
151
    case llvm::Intrinsic::annotation:
484
151
    case llvm::Intrinsic::donothing:
485
151
    case llvm::Intrinsic::assume:
486
151
    // Some debug info intrinsics are supported/ignored.
487
151
    case llvm::Intrinsic::dbg_value:
488
151
    case llvm::Intrinsic::dbg_declare:
489
151
      return true;
490
94
    default:
491
94
      break;
492
245
    }
493
245
  }
494
32.6k
  return false;
495
32.7k
}
496
497
bool polly::canSynthesize(const Value *V, const Scop &S, ScalarEvolution *SE,
498
33.5k
                          Loop *Scope) {
499
33.5k
  if (
!V || 33.5k
!SE->isSCEVable(V->getType())33.5k
)
500
3.28k
    return false;
501
33.5k
502
30.2k
  
if (const SCEV *30.2k
Scev30.2k
= SE->getSCEVAtScope(const_cast<Value *>(V), Scope))
503
30.2k
    
if (30.2k
!isa<SCEVCouldNotCompute>(Scev)30.2k
)
504
30.2k
      
if (30.2k
!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false)30.2k
)
505
21.9k
        return true;
506
30.2k
507
8.33k
  return false;
508
30.2k
}
509
510
16.5k
llvm::BasicBlock *polly::getUseBlock(llvm::Use &U) {
511
16.5k
  Instruction *UI = dyn_cast<Instruction>(U.getUser());
512
16.5k
  if (!UI)
513
0
    return nullptr;
514
16.5k
515
16.5k
  
if (PHINode *16.5k
PHI16.5k
= dyn_cast<PHINode>(UI))
516
1.70k
    return PHI->getIncomingBlock(U);
517
16.5k
518
14.8k
  return UI->getParent();
519
16.5k
}
520
521
std::tuple<std::vector<const SCEV *>, std::vector<int>>
522
2.38k
polly::getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) {
523
2.38k
  std::vector<const SCEV *> Subscripts;
524
2.38k
  std::vector<int> Sizes;
525
2.38k
526
2.38k
  Type *Ty = GEP->getPointerOperandType();
527
2.38k
528
2.38k
  bool DroppedFirstDim = false;
529
2.38k
530
5.39k
  for (unsigned i = 1; 
i < GEP->getNumOperands()5.39k
;
i++3.00k
)
{3.11k
531
3.11k
532
3.11k
    const SCEV *Expr = SE.getSCEV(GEP->getOperand(i));
533
3.11k
534
3.11k
    if (
i == 13.11k
)
{2.38k
535
2.38k
      if (auto *
PtrTy2.38k
= dyn_cast<PointerType>(Ty))
{2.38k
536
2.38k
        Ty = PtrTy->getElementType();
537
0
      } else 
if (auto *0
ArrayTy0
= dyn_cast<ArrayType>(Ty))
{0
538
0
        Ty = ArrayTy->getElementType();
539
0
      } else {
540
0
        Subscripts.clear();
541
0
        Sizes.clear();
542
0
        break;
543
0
      }
544
2.38k
      
if (auto *2.38k
Const2.38k
= dyn_cast<SCEVConstant>(Expr))
545
762
        
if (762
Const->getValue()->isZero()762
)
{554
546
554
          DroppedFirstDim = true;
547
554
          continue;
548
554
        }
549
1.83k
      Subscripts.push_back(Expr);
550
1.83k
      continue;
551
2.38k
    }
552
3.11k
553
725
    auto *ArrayTy = dyn_cast<ArrayType>(Ty);
554
725
    if (
!ArrayTy725
)
{104
555
104
      Subscripts.clear();
556
104
      Sizes.clear();
557
104
      break;
558
104
    }
559
725
560
621
    Subscripts.push_back(Expr);
561
621
    if (
!(DroppedFirstDim && 621
i == 2475
))
562
214
      Sizes.push_back(ArrayTy->getNumElements());
563
621
564
621
    Ty = ArrayTy->getElementType();
565
621
  }
566
2.38k
567
2.38k
  return std::make_tuple(Subscripts, Sizes);
568
2.38k
}