Coverage Report

Created: 2017-11-23 03:11

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/polly/lib/Support/ScopHelper.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- ScopHelper.cpp - Some Helper Functions for Scop.  ------------------===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// Small functions that help with Scop and LLVM-IR.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#include "polly/Support/ScopHelper.h"
15
#include "polly/Options.h"
16
#include "polly/ScopInfo.h"
17
#include "polly/Support/SCEVValidator.h"
18
#include "llvm/Analysis/LoopInfo.h"
19
#include "llvm/Analysis/RegionInfo.h"
20
#include "llvm/Analysis/ScalarEvolution.h"
21
#include "llvm/Analysis/ScalarEvolutionExpander.h"
22
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
23
#include "llvm/IR/CFG.h"
24
#include "llvm/IR/IntrinsicInst.h"
25
#include "llvm/Support/Debug.h"
26
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
27
28
using namespace llvm;
29
using namespace polly;
30
31
#define DEBUG_TYPE "polly-scop-helper"
32
33
static cl::opt<bool> PollyAllowErrorBlocks(
34
    "polly-allow-error-blocks",
35
    cl::desc("Allow to speculate on the execution of 'error blocks'."),
36
    cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
37
38
// Ensures that there is just one predecessor to the entry node from outside the
39
// region.
40
// The identity of the region entry node is preserved.
41
static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI,
42
290
                                RegionInfo *RI) {
43
290
  BasicBlock *EnteringBB = R->getEnteringBlock();
44
290
  BasicBlock *Entry = R->getEntry();
45
290
46
290
  // Before (one of):
47
290
  //
48
290
  //                       \    /            //
49
290
  //                      EnteringBB         //
50
290
  //                        |    \------>    //
51
290
  //   \   /                |                //
52
290
  //   Entry <--\         Entry <--\         //
53
290
  //   /   \    /         /   \    /         //
54
290
  //        ....               ....          //
55
290
56
290
  // Create single entry edge if the region has multiple entry edges.
57
290
  if (!EnteringBB) {
58
6
    SmallVector<BasicBlock *, 4> Preds;
59
6
    for (BasicBlock *P : predecessors(Entry))
60
12
      if (!R->contains(P))
61
12
        Preds.push_back(P);
62
6
63
6
    BasicBlock *NewEntering =
64
6
        SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI);
65
6
66
6
    if (RI) {
67
6
      // The exit block of predecessing regions must be changed to NewEntering
68
12
      for (BasicBlock *ExitPred : predecessors(NewEntering)) {
69
12
        Region *RegionOfPred = RI->getRegionFor(ExitPred);
70
12
        if (RegionOfPred->getExit() != Entry)
71
11
          continue;
72
1
73
2
        
while (1
!RegionOfPred->isTopLevelRegion() &&
74
2
               
RegionOfPred->getExit() == Entry1
) {
75
1
          RegionOfPred->replaceExit(NewEntering);
76
1
          RegionOfPred = RegionOfPred->getParent();
77
1
        }
78
12
      }
79
6
80
6
      // Make all ancestors use EnteringBB as entry; there might be edges to it
81
6
      Region *AncestorR = R->getParent();
82
6
      RI->setRegionFor(NewEntering, AncestorR);
83
11
      while (!AncestorR->isTopLevelRegion() && 
AncestorR->getEntry() == Entry6
) {
84
5
        AncestorR->replaceEntry(NewEntering);
85
5
        AncestorR = AncestorR->getParent();
86
5
      }
87
6
    }
88
6
89
6
    EnteringBB = NewEntering;
90
6
  }
91
290
  assert(R->getEnteringBlock() == EnteringBB);
92
290
93
290
  // After:
94
290
  //
95
290
  //    \    /       //
96
290
  //  EnteringBB     //
97
290
  //      |          //
98
290
  //      |          //
99
290
  //    Entry <--\   //
100
290
  //    /   \    /   //
101
290
  //         ....    //
102
290
}
103
104
// Ensure that the region has a single block that branches to the exit node.
105
static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI,
106
290
                               RegionInfo *RI) {
107
290
  BasicBlock *ExitBB = R->getExit();
108
290
  BasicBlock *ExitingBB = R->getExitingBlock();
109
290
110
290
  // Before:
111
290
  //
112
290
  //   (Region)   ______/  //
113
290
  //      \  |   /         //
114
290
  //       ExitBB          //
115
290
  //       /    \          //
116
290
117
290
  if (!ExitingBB) {
118
60
    SmallVector<BasicBlock *, 4> Preds;
119
60
    for (BasicBlock *P : predecessors(ExitBB))
120
140
      if (R->contains(P))
121
124
        Preds.push_back(P);
122
60
123
60
    //  Preds[0] Preds[1]      otherBB //
124
60
    //         \  |  ________/         //
125
60
    //          \ | /                  //
126
60
    //           BB                    //
127
60
    ExitingBB =
128
60
        SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI);
129
60
    // Preds[0] Preds[1]      otherBB  //
130
60
    //        \  /           /         //
131
60
    // BB.region_exiting    /          //
132
60
    //                  \  /           //
133
60
    //                   BB            //
134
60
135
60
    if (RI)
136
60
      RI->setRegionFor(ExitingBB, R);
137
60
138
60
    // Change the exit of nested regions, but not the region itself,
139
60
    R->replaceExitRecursive(ExitingBB);
140
60
    R->replaceExit(ExitBB);
141
60
  }
142
290
  assert(ExitingBB == R->getExitingBlock());
143
290
144
290
  // After:
145
290
  //
146
290
  //     \   /                //
147
290
  //    ExitingBB     _____/  //
148
290
  //          \      /        //
149
290
  //           ExitBB         //
150
290
  //           /    \         //
151
290
}
152
153
void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI,
154
290
                           RegionInfo *RI) {
155
290
  assert(R && !R->isTopLevelRegion());
156
290
  assert(!RI || RI == R->getRegionInfo());
157
290
  assert((!RI || DT) &&
158
290
         "RegionInfo requires DominatorTree to be updated as well");
159
290
160
290
  simplifyRegionEntry(R, DT, LI, RI);
161
290
  simplifyRegionExit(R, DT, LI, RI);
162
290
  assert(R->isSimple());
163
290
}
164
165
// Split the block into two successive blocks.
166
//
167
// Like llvm::SplitBlock, but also preserves RegionInfo
168
static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt,
169
                              DominatorTree *DT, llvm::LoopInfo *LI,
170
13
                              RegionInfo *RI) {
171
13
  assert(Old && SplitPt);
172
13
173
13
  // Before:
174
13
  //
175
13
  //  \   /  //
176
13
  //   Old   //
177
13
  //  /   \  //
178
13
179
13
  BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI);
180
13
181
13
  if (RI) {
182
0
    Region *R = RI->getRegionFor(Old);
183
0
    RI->setRegionFor(NewBlock, R);
184
0
  }
185
13
186
13
  // After:
187
13
  //
188
13
  //   \   /    //
189
13
  //    Old     //
190
13
  //     |      //
191
13
  //  NewBlock  //
192
13
  //   /   \    //
193
13
194
13
  return NewBlock;
195
13
}
196
197
void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, DominatorTree *DT,
198
13
                                     LoopInfo *LI, RegionInfo *RI) {
199
13
  // Find first non-alloca instruction. Every basic block has a non-alloca
200
13
  // instruction, as every well formed basic block has a terminator.
201
13
  BasicBlock::iterator I = EntryBlock->begin();
202
13
  while (isa<AllocaInst>(I))
203
13
    
++I0
;
204
13
205
13
  // splitBlock updates DT, LI and RI.
206
13
  splitBlock(EntryBlock, &*I, DT, LI, RI);
207
13
}
208
209
13
void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) {
210
13
  auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
211
13
  auto *DT = DTWP ? &DTWP->getDomTree() : 
nullptr0
;
212
13
  auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>();
213
13
  auto *LI = LIWP ? &LIWP->getLoopInfo() : 
nullptr0
;
214
13
  RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>();
215
13
  RegionInfo *RI = RIP ? 
&RIP->getRegionInfo()0
: nullptr;
216
13
217
13
  // splitBlock updates DT, LI and RI.
218
13
  polly::splitEntryBlockForAlloca(EntryBlock, DT, LI, RI);
219
13
}
220
221
/// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem
222
/// instruction but just use it, if it is referenced as a SCEVUnknown. We want
223
/// however to generate new code if the instruction is in the analyzed region
224
/// and we generate code outside/in front of that region. Hence, we generate the
225
/// code for the SDiv/SRem operands in front of the analyzed region and then
226
/// create a new SDiv/SRem operation there too.
227
struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> {
228
  friend struct SCEVVisitor<ScopExpander, const SCEV *>;
229
230
  explicit ScopExpander(const Region &R, ScalarEvolution &SE,
231
                        const DataLayout &DL, const char *Name, ValueMapT *VMap,
232
                        BasicBlock *RTCBB)
233
      : Expander(SCEVExpander(SE, DL, Name)), SE(SE), Name(Name), R(R),
234
1.25k
        VMap(VMap), RTCBB(RTCBB) {}
235
236
1.26k
  Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) {
237
1.26k
    // If we generate code in the region we will immediately fall back to the
238
1.26k
    // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if
239
1.26k
    // needed replace them by copies computed in the entering block.
240
1.26k
    if (!R.contains(I))
241
1.26k
      E = visit(E);
242
1.26k
    return Expander.expandCodeFor(E, Ty, I);
243
1.26k
  }
244
245
private:
246
  SCEVExpander Expander;
247
  ScalarEvolution &SE;
248
  const char *Name;
249
  const Region &R;
250
  ValueMapT *VMap;
251
  BasicBlock *RTCBB;
252
253
  const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst,
254
1.59k
                               Instruction *IP) {
255
1.59k
    if (!Inst || 
!R.contains(Inst)694
)
256
1.59k
      return E;
257
2
258
2
    assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() &&
259
2
           !isa<PHINode>(Inst));
260
2
261
2
    auto *InstClone = Inst->clone();
262
2
    for (auto &Op : Inst->operands()) {
263
2
      assert(SE.isSCEVable(Op->getType()));
264
2
      auto *OpSCEV = SE.getSCEV(Op);
265
2
      auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP);
266
2
      InstClone->replaceUsesOfWith(Op, OpClone);
267
2
    }
268
1.59k
269
1.59k
    InstClone->setName(Name + Inst->getName());
270
1.59k
    InstClone->insertBefore(IP);
271
1.59k
    return SE.getSCEV(InstClone);
272
1.59k
  }
273
274
1.66k
  const SCEV *visitUnknown(const SCEVUnknown *E) {
275
1.66k
276
1.66k
    // If a value mapping was given try if the underlying value is remapped.
277
1.66k
    Value *NewVal = VMap ? 
VMap->lookup(E->getValue())1.64k
:
nullptr16
;
278
1.66k
    if (NewVal) {
279
70
      auto *NewE = SE.getSCEV(NewVal);
280
70
281
70
      // While the mapped value might be different the SCEV representation might
282
70
      // not be. To this end we will check before we go into recursion here.
283
70
      if (E != NewE)
284
67
        return visit(NewE);
285
1.59k
    }
286
1.59k
287
1.59k
    Instruction *Inst = dyn_cast<Instruction>(E->getValue());
288
1.59k
    Instruction *IP;
289
1.59k
    if (Inst && 
!R.contains(Inst)697
)
290
695
      IP = Inst;
291
901
    else if (Inst && 
RTCBB->getParent() == Inst->getFunction()2
)
292
2
      IP = RTCBB->getTerminator();
293
899
    else
294
899
      IP = RTCBB->getParent()->getEntryBlock().getTerminator();
295
1.59k
296
1.59k
    if (!Inst || 
(697
Inst->getOpcode() != Instruction::SRem697
&&
297
697
                  Inst->getOpcode() != Instruction::SDiv))
298
1.59k
      return visitGenericInst(E, Inst, IP);
299
3
300
3
    const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0));
301
3
    const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1));
302
3
303
3
    if (!SE.isKnownNonZero(RHSScev))
304
1
      RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
305
1.66k
306
1.66k
    Value *LHS = expandCodeFor(LHSScev, E->getType(), IP);
307
1.66k
    Value *RHS = expandCodeFor(RHSScev, E->getType(), IP);
308
1.66k
309
1.66k
    Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(),
310
1.66k
                                  LHS, RHS, Inst->getName() + Name, IP);
311
1.66k
    return SE.getSCEV(Inst);
312
1.66k
  }
313
314
  /// The following functions will just traverse the SCEV and rebuild it with
315
  /// the new operands returned by the traversal.
316
  ///
317
  ///{
318
1.02k
  const SCEV *visitConstant(const SCEVConstant *E) { return E; }
319
95
  const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) {
320
95
    return SE.getTruncateExpr(visit(E->getOperand()), E->getType());
321
95
  }
322
20
  const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) {
323
20
    return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType());
324
20
  }
325
50
  const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) {
326
50
    return SE.getSignExtendExpr(visit(E->getOperand()), E->getType());
327
50
  }
328
15
  const SCEV *visitUDivExpr(const SCEVUDivExpr *E) {
329
15
    auto *RHSScev = visit(E->getRHS());
330
15
    if (!SE.isKnownNonZero(RHSScev))
331
10
      RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
332
15
    return SE.getUDivExpr(visit(E->getLHS()), RHSScev);
333
15
  }
334
581
  const SCEV *visitAddExpr(const SCEVAddExpr *E) {
335
581
    SmallVector<const SCEV *, 4> NewOps;
336
581
    for (const SCEV *Op : E->operands())
337
1.33k
      NewOps.push_back(visit(Op));
338
581
    return SE.getAddExpr(NewOps);
339
581
  }
340
511
  const SCEV *visitMulExpr(const SCEVMulExpr *E) {
341
511
    SmallVector<const SCEV *, 4> NewOps;
342
511
    for (const SCEV *Op : E->operands())
343
1.05k
      NewOps.push_back(visit(Op));
344
511
    return SE.getMulExpr(NewOps);
345
511
  }
346
3
  const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
347
3
    SmallVector<const SCEV *, 4> NewOps;
348
3
    for (const SCEV *Op : E->operands())
349
6
      NewOps.push_back(visit(Op));
350
3
    return SE.getUMaxExpr(NewOps);
351
3
  }
352
1
  const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
353
1
    SmallVector<const SCEV *, 4> NewOps;
354
1
    for (const SCEV *Op : E->operands())
355
2
      NewOps.push_back(visit(Op));
356
1
    return SE.getSMaxExpr(NewOps);
357
1
  }
358
35
  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
359
35
    SmallVector<const SCEV *, 4> NewOps;
360
35
    for (const SCEV *Op : E->operands())
361
70
      NewOps.push_back(visit(Op));
362
35
    return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags());
363
35
  }
364
  ///}
365
};
366
367
Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL,
368
                            const char *Name, const SCEV *E, Type *Ty,
369
                            Instruction *IP, ValueMapT *VMap,
370
1.25k
                            BasicBlock *RTCBB) {
371
1.25k
  ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap, RTCBB);
372
1.25k
  return Expander.expandCodeFor(E, Ty, IP);
373
1.25k
}
374
375
bool polly::isErrorBlock(BasicBlock &BB, const Region &R, LoopInfo &LI,
376
100k
                         const DominatorTree &DT) {
377
100k
  if (!PollyAllowErrorBlocks)
378
0
    return false;
379
100k
380
100k
  if (isa<UnreachableInst>(BB.getTerminator()))
381
0
    return true;
382
100k
383
100k
  if (LI.isLoopHeader(&BB))
384
37.8k
    return false;
385
62.2k
386
62.2k
  // Basic blocks that are always executed are not considered error blocks,
387
62.2k
  // as their execution can not be a rare event.
388
62.2k
  bool DominatesAllPredecessors = true;
389
62.2k
  if (R.isTopLevelRegion()) {
390
115
    for (BasicBlock &I : *R.getEntry()->getParent())
391
624
      if (isa<ReturnInst>(I.getTerminator()) && 
!DT.dominates(&BB, &I)115
)
392
14
        DominatesAllPredecessors = false;
393
62.1k
  } else {
394
62.1k
    for (auto Pred : predecessors(R.getExit()))
395
96.4k
      if (R.contains(Pred) && 
!DT.dominates(&BB, Pred)90.8k
)
396
46.3k
        DominatesAllPredecessors = false;
397
62.1k
  }
398
62.2k
399
62.2k
  if (DominatesAllPredecessors)
400
22.0k
    return false;
401
40.2k
402
40.2k
  // FIXME: This is a simple heuristic to determine if the load is executed
403
40.2k
  //        in a conditional. However, we actually would need the control
404
40.2k
  //        condition, i.e., the post dominance frontier. Alternatively we
405
40.2k
  //        could walk up the dominance tree until we find a block that is
406
40.2k
  //        not post dominated by the load and check if it is a conditional
407
40.2k
  //        or a loop header.
408
40.2k
  auto *DTNode = DT.getNode(&BB);
409
40.2k
  if (!DTNode)
410
1
    return false;
411
40.2k
412
40.2k
  DTNode = DTNode->getIDom();
413
40.2k
414
40.2k
  if (!DTNode)
415
0
    return false;
416
40.2k
417
40.2k
  auto *IDomBB = DTNode->getBlock();
418
40.2k
  if (LI.isLoopHeader(IDomBB))
419
20.0k
    return false;
420
20.2k
421
20.2k
  for (Instruction &Inst : BB)
422
61.7k
    if (CallInst *CI = dyn_cast<CallInst>(&Inst)) {
423
435
      if (isIgnoredIntrinsic(CI))
424
66
        continue;
425
369
426
369
      // memset, memcpy and memmove are modeled intrinsics.
427
369
      if (isa<MemSetInst>(CI) || 
isa<MemTransferInst>(CI)348
)
428
21
        continue;
429
348
430
348
      if (!CI->doesNotAccessMemory())
431
326
        return true;
432
22
      if (CI->doesNotReturn())
433
0
        return true;
434
19.8k
    }
435
19.8k
436
19.8k
  return false;
437
19.8k
}
438
439
32.0k
Value *polly::getConditionFromTerminator(TerminatorInst *TI) {
440
32.0k
  if (BranchInst *BR = dyn_cast<BranchInst>(TI)) {
441
31.9k
    if (BR->isUnconditional())
442
14.6k
      return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext()));
443
17.3k
444
17.3k
    return BR->getCondition();
445
17.3k
  }
446
66
447
66
  if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
448
66
    return SI->getCondition();
449
0
450
0
  return nullptr;
451
0
}
452
453
bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI,
454
2.98k
                            ScalarEvolution &SE, const DominatorTree &DT) {
455
2.98k
  Loop *L = LI.getLoopFor(LInst->getParent());
456
2.98k
  auto *Ptr = LInst->getPointerOperand();
457
2.98k
  const SCEV *PtrSCEV = SE.getSCEVAtScope(Ptr, L);
458
3.31k
  while (L && 
R.contains(L)844
) {
459
341
    if (!SE.isLoopInvariant(PtrSCEV, L))
460
17
      return false;
461
324
    L = L->getParentLoop();
462
324
  }
463
2.98k
464
21.4k
  
for (auto *User : Ptr->users()) 2.97k
{
465
21.4k
    auto *UserI = dyn_cast<Instruction>(User);
466
21.4k
    if (!UserI || !R.contains(UserI))
467
3.05k
      continue;
468
18.3k
    if (!UserI->mayWriteToMemory())
469
17.9k
      continue;
470
388
471
388
    auto &BB = *UserI->getParent();
472
388
    if (DT.dominates(&BB, LInst->getParent()))
473
121
      return false;
474
267
475
267
    bool DominatesAllPredecessors = true;
476
267
    for (auto Pred : predecessors(R.getExit()))
477
526
      if (R.contains(Pred) && !DT.dominates(&BB, Pred))
478
492
        DominatesAllPredecessors = false;
479
267
480
267
    if (!DominatesAllPredecessors)
481
267
      continue;
482
0
483
0
    return false;
484
0
  }
485
2.85k
486
2.85k
  return true;
487
2.85k
}
488
489
16.9k
bool polly::isIgnoredIntrinsic(const Value *V) {
490
16.9k
  if (auto *IT = dyn_cast<IntrinsicInst>(V)) {
491
242
    switch (IT->getIntrinsicID()) {
492
242
    // Lifetime markers are supported/ignored.
493
242
    case llvm::Intrinsic::lifetime_start:
494
124
    case llvm::Intrinsic::lifetime_end:
495
124
    // Invariant markers are supported/ignored.
496
124
    case llvm::Intrinsic::invariant_start:
497
124
    case llvm::Intrinsic::invariant_end:
498
124
    // Some misc annotations are supported/ignored.
499
124
    case llvm::Intrinsic::var_annotation:
500
124
    case llvm::Intrinsic::ptr_annotation:
501
124
    case llvm::Intrinsic::annotation:
502
124
    case llvm::Intrinsic::donothing:
503
124
    case llvm::Intrinsic::assume:
504
124
    // Some debug info intrinsics are supported/ignored.
505
124
    case llvm::Intrinsic::dbg_value:
506
124
    case llvm::Intrinsic::dbg_declare:
507
124
      return true;
508
124
    default:
509
118
      break;
510
16.8k
    }
511
16.8k
  }
512
16.8k
  return false;
513
16.8k
}
514
515
bool polly::canSynthesize(const Value *V, const Scop &S, ScalarEvolution *SE,
516
28.0k
                          Loop *Scope) {
517
28.0k
  if (!V || !SE->isSCEVable(V->getType()))
518
4.30k
    return false;
519
23.7k
520
23.7k
  const InvariantLoadsSetTy &ILS = S.getRequiredInvariantLoads();
521
23.7k
  if (const SCEV *Scev = SE->getSCEVAtScope(const_cast<Value *>(V), Scope))
522
23.7k
    if (!isa<SCEVCouldNotCompute>(Scev))
523
23.7k
      if (!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false, ILS))
524
16.0k
        return true;
525
7.67k
526
7.67k
  return false;
527
7.67k
}
528
529
18.6k
llvm::BasicBlock *polly::getUseBlock(const llvm::Use &U) {
530
18.6k
  Instruction *UI = dyn_cast<Instruction>(U.getUser());
531
18.6k
  if (!UI)
532
0
    return nullptr;
533
18.6k
534
18.6k
  if (PHINode *PHI = dyn_cast<PHINode>(UI))
535
2.00k
    return PHI->getIncomingBlock(U);
536
16.6k
537
16.6k
  return UI->getParent();
538
16.6k
}
539
540
std::tuple<std::vector<const SCEV *>, std::vector<int>>
541
2.71k
polly::getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) {
542
2.71k
  std::vector<const SCEV *> Subscripts;
543
2.71k
  std::vector<int> Sizes;
544
2.71k
545
2.71k
  Type *Ty = GEP->getPointerOperandType();
546
2.71k
547
2.71k
  bool DroppedFirstDim = false;
548
2.71k
549
6.09k
  for (unsigned i = 1; i < GEP->getNumOperands(); 
i++3.38k
) {
550
3.48k
551
3.48k
    const SCEV *Expr = SE.getSCEV(GEP->getOperand(i));
552
3.48k
553
3.48k
    if (i == 1) {
554
2.71k
      if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
555
2.71k
        Ty = PtrTy->getElementType();
556
2.71k
      } else 
if (auto *0
ArrayTy0
= dyn_cast<ArrayType>(Ty)) {
557
0
        Ty = ArrayTy->getElementType();
558
0
      } else {
559
0
        Subscripts.clear();
560
0
        Sizes.clear();
561
0
        break;
562
0
      }
563
2.71k
      if (auto *Const = dyn_cast<SCEVConstant>(Expr))
564
780
        if (Const->getValue()->isZero()) {
565
567
          DroppedFirstDim = true;
566
567
          continue;
567
567
        }
568
2.14k
      Subscripts.push_back(Expr);
569
2.14k
      continue;
570
2.14k
    }
571
775
572
775
    auto *ArrayTy = dyn_cast<ArrayType>(Ty);
573
775
    if (!ArrayTy) {
574
106
      Subscripts.clear();
575
106
      Sizes.clear();
576
106
      break;
577
106
    }
578
669
579
669
    Subscripts.push_back(Expr);
580
669
    if (!(DroppedFirstDim && 
i == 2478
))
581
259
      Sizes.push_back(ArrayTy->getNumElements());
582
3.48k
583
3.48k
    Ty = ArrayTy->getElementType();
584
3.48k
  }
585
2.71k
586
2.71k
  return std::make_tuple(Subscripts, Sizes);
587
2.71k
}
588
589
llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
590
14.2k
                                           const BoxedLoopsSetTy &BoxedLoops) {
591
14.3k
  while (BoxedLoops.count(L))
592
14.2k
    
L = L->getParentLoop()46
;
593
14.2k
  return L;
594
14.2k
}
595
596
llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::BasicBlock *BB,
597
                                           llvm::LoopInfo &LI,
598
14.2k
                                           const BoxedLoopsSetTy &BoxedLoops) {
599
14.2k
  Loop *L = LI.getLoopFor(BB);
600
14.2k
  return getFirstNonBoxedLoopFor(L, LI, BoxedLoops);
601
14.2k
}