Coverage Report

Created: 2017-10-03 07:32

/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/tools/polly/lib/Support/ScopHelper.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- ScopHelper.cpp - Some Helper Functions for Scop.  ------------------===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// Small functions that help with Scop and LLVM-IR.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#include "polly/Support/ScopHelper.h"
15
#include "polly/Options.h"
16
#include "polly/ScopInfo.h"
17
#include "polly/Support/SCEVValidator.h"
18
#include "llvm/Analysis/LoopInfo.h"
19
#include "llvm/Analysis/RegionInfo.h"
20
#include "llvm/Analysis/ScalarEvolution.h"
21
#include "llvm/Analysis/ScalarEvolutionExpander.h"
22
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
23
#include "llvm/IR/CFG.h"
24
#include "llvm/IR/IntrinsicInst.h"
25
#include "llvm/Support/Debug.h"
26
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
27
28
using namespace llvm;
29
using namespace polly;
30
31
#define DEBUG_TYPE "polly-scop-helper"
32
33
static cl::opt<bool> PollyAllowErrorBlocks(
34
    "polly-allow-error-blocks",
35
    cl::desc("Allow to speculate on the execution of 'error blocks'."),
36
    cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
37
38
// Ensures that there is just one predecessor to the entry node from outside the
39
// region.
40
// The identity of the region entry node is preserved.
41
static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI,
42
288
                                RegionInfo *RI) {
43
288
  BasicBlock *EnteringBB = R->getEnteringBlock();
44
288
  BasicBlock *Entry = R->getEntry();
45
288
46
288
  // Before (one of):
47
288
  //
48
288
  //                       \    /            //
49
288
  //                      EnteringBB         //
50
288
  //                        |    \------>    //
51
288
  //   \   /                |                //
52
288
  //   Entry <--\         Entry <--\         //
53
288
  //   /   \    /         /   \    /         //
54
288
  //        ....               ....          //
55
288
56
288
  // Create single entry edge if the region has multiple entry edges.
57
288
  if (
!EnteringBB288
) {
58
6
    SmallVector<BasicBlock *, 4> Preds;
59
6
    for (BasicBlock *P : predecessors(Entry))
60
12
      
if (12
!R->contains(P)12
)
61
12
        Preds.push_back(P);
62
6
63
6
    BasicBlock *NewEntering =
64
6
        SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI);
65
6
66
6
    if (
RI6
) {
67
6
      // The exit block of predecessing regions must be changed to NewEntering
68
12
      for (BasicBlock *ExitPred : predecessors(NewEntering)) {
69
12
        Region *RegionOfPred = RI->getRegionFor(ExitPred);
70
12
        if (RegionOfPred->getExit() != Entry)
71
11
          continue;
72
1
73
1
        
while (1
!RegionOfPred->isTopLevelRegion() &&
74
1
               
RegionOfPred->getExit() == Entry1
) {
75
1
          RegionOfPred->replaceExit(NewEntering);
76
1
          RegionOfPred = RegionOfPred->getParent();
77
1
        }
78
12
      }
79
6
80
6
      // Make all ancestors use EnteringBB as entry; there might be edges to it
81
6
      Region *AncestorR = R->getParent();
82
6
      RI->setRegionFor(NewEntering, AncestorR);
83
11
      while (
!AncestorR->isTopLevelRegion() && 11
AncestorR->getEntry() == Entry6
) {
84
5
        AncestorR->replaceEntry(NewEntering);
85
5
        AncestorR = AncestorR->getParent();
86
5
      }
87
6
    }
88
6
89
6
    EnteringBB = NewEntering;
90
6
  }
91
288
  assert(R->getEnteringBlock() == EnteringBB);
92
288
93
288
  // After:
94
288
  //
95
288
  //    \    /       //
96
288
  //  EnteringBB     //
97
288
  //      |          //
98
288
  //      |          //
99
288
  //    Entry <--\   //
100
288
  //    /   \    /   //
101
288
  //         ....    //
102
288
}
103
104
// Ensure that the region has a single block that branches to the exit node.
105
static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI,
106
288
                               RegionInfo *RI) {
107
288
  BasicBlock *ExitBB = R->getExit();
108
288
  BasicBlock *ExitingBB = R->getExitingBlock();
109
288
110
288
  // Before:
111
288
  //
112
288
  //   (Region)   ______/  //
113
288
  //      \  |   /         //
114
288
  //       ExitBB          //
115
288
  //       /    \          //
116
288
117
288
  if (
!ExitingBB288
) {
118
60
    SmallVector<BasicBlock *, 4> Preds;
119
60
    for (BasicBlock *P : predecessors(ExitBB))
120
140
      
if (140
R->contains(P)140
)
121
124
        Preds.push_back(P);
122
60
123
60
    //  Preds[0] Preds[1]      otherBB //
124
60
    //         \  |  ________/         //
125
60
    //          \ | /                  //
126
60
    //           BB                    //
127
60
    ExitingBB =
128
60
        SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI);
129
60
    // Preds[0] Preds[1]      otherBB  //
130
60
    //        \  /           /         //
131
60
    // BB.region_exiting    /          //
132
60
    //                  \  /           //
133
60
    //                   BB            //
134
60
135
60
    if (RI)
136
60
      RI->setRegionFor(ExitingBB, R);
137
60
138
60
    // Change the exit of nested regions, but not the region itself,
139
60
    R->replaceExitRecursive(ExitingBB);
140
60
    R->replaceExit(ExitBB);
141
60
  }
142
288
  assert(ExitingBB == R->getExitingBlock());
143
288
144
288
  // After:
145
288
  //
146
288
  //     \   /                //
147
288
  //    ExitingBB     _____/  //
148
288
  //          \      /        //
149
288
  //           ExitBB         //
150
288
  //           /    \         //
151
288
}
152
153
void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI,
154
288
                           RegionInfo *RI) {
155
288
  assert(R && !R->isTopLevelRegion());
156
288
  assert(!RI || RI == R->getRegionInfo());
157
288
  assert((!RI || DT) &&
158
288
         "RegionInfo requires DominatorTree to be updated as well");
159
288
160
288
  simplifyRegionEntry(R, DT, LI, RI);
161
288
  simplifyRegionExit(R, DT, LI, RI);
162
288
  assert(R->isSimple());
163
288
}
164
165
// Split the block into two successive blocks.
166
//
167
// Like llvm::SplitBlock, but also preserves RegionInfo
168
static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt,
169
                              DominatorTree *DT, llvm::LoopInfo *LI,
170
13
                              RegionInfo *RI) {
171
13
  assert(Old && SplitPt);
172
13
173
13
  // Before:
174
13
  //
175
13
  //  \   /  //
176
13
  //   Old   //
177
13
  //  /   \  //
178
13
179
13
  BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI);
180
13
181
13
  if (
RI13
) {
182
0
    Region *R = RI->getRegionFor(Old);
183
0
    RI->setRegionFor(NewBlock, R);
184
0
  }
185
13
186
13
  // After:
187
13
  //
188
13
  //   \   /    //
189
13
  //    Old     //
190
13
  //     |      //
191
13
  //  NewBlock  //
192
13
  //   /   \    //
193
13
194
13
  return NewBlock;
195
13
}
196
197
void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, DominatorTree *DT,
198
13
                                     LoopInfo *LI, RegionInfo *RI) {
199
13
  // Find first non-alloca instruction. Every basic block has a non-alloca
200
13
  // instruction, as every well formed basic block has a terminator.
201
13
  BasicBlock::iterator I = EntryBlock->begin();
202
13
  while (isa<AllocaInst>(I))
203
0
    ++I;
204
13
205
13
  // splitBlock updates DT, LI and RI.
206
13
  splitBlock(EntryBlock, &*I, DT, LI, RI);
207
13
}
208
209
13
void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) {
210
13
  auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
211
13
  auto *DT = DTWP ? 
&DTWP->getDomTree()13
:
nullptr0
;
212
13
  auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>();
213
13
  auto *LI = LIWP ? 
&LIWP->getLoopInfo()13
:
nullptr0
;
214
13
  RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>();
215
13
  RegionInfo *RI = RIP ? 
&RIP->getRegionInfo()0
:
nullptr13
;
216
13
217
13
  // splitBlock updates DT, LI and RI.
218
13
  polly::splitEntryBlockForAlloca(EntryBlock, DT, LI, RI);
219
13
}
220
221
/// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem
222
/// instruction but just use it, if it is referenced as a SCEVUnknown. We want
223
/// however to generate new code if the instruction is in the analyzed region
224
/// and we generate code outside/in front of that region. Hence, we generate the
225
/// code for the SDiv/SRem operands in front of the analyzed region and then
226
/// create a new SDiv/SRem operation there too.
227
struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> {
228
  friend struct SCEVVisitor<ScopExpander, const SCEV *>;
229
230
  explicit ScopExpander(const Region &R, ScalarEvolution &SE,
231
                        const DataLayout &DL, const char *Name, ValueMapT *VMap,
232
                        BasicBlock *RTCBB)
233
      : Expander(SCEVExpander(SE, DL, Name)), SE(SE), Name(Name), R(R),
234
1.24k
        VMap(VMap), RTCBB(RTCBB) {}
235
236
1.25k
  Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) {
237
1.25k
    // If we generate code in the region we will immediately fall back to the
238
1.25k
    // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if
239
1.25k
    // needed replace them by copies computed in the entering block.
240
1.25k
    if (!R.contains(I))
241
1.25k
      E = visit(E);
242
1.25k
    return Expander.expandCodeFor(E, Ty, I);
243
1.25k
  }
244
245
private:
246
  SCEVExpander Expander;
247
  ScalarEvolution &SE;
248
  const char *Name;
249
  const Region &R;
250
  ValueMapT *VMap;
251
  BasicBlock *RTCBB;
252
253
  const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst,
254
1.58k
                               Instruction *IP) {
255
1.58k
    if (
!Inst || 1.58k
!R.contains(Inst)688
)
256
1.58k
      return E;
257
2
258
1.58k
    assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() &&
259
2
           !isa<PHINode>(Inst));
260
2
261
2
    auto *InstClone = Inst->clone();
262
2
    for (auto &Op : Inst->operands()) {
263
2
      assert(SE.isSCEVable(Op->getType()));
264
2
      auto *OpSCEV = SE.getSCEV(Op);
265
2
      auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP);
266
2
      InstClone->replaceUsesOfWith(Op, OpClone);
267
2
    }
268
1.58k
269
1.58k
    InstClone->setName(Name + Inst->getName());
270
1.58k
    InstClone->insertBefore(IP);
271
1.58k
    return SE.getSCEV(InstClone);
272
1.58k
  }
273
274
1.65k
  const SCEV *visitUnknown(const SCEVUnknown *E) {
275
1.65k
276
1.65k
    // If a value mapping was given try if the underlying value is remapped.
277
1.65k
    Value *NewVal = VMap ? 
VMap->lookup(E->getValue())1.63k
:
nullptr16
;
278
1.65k
    if (
NewVal1.65k
) {
279
69
      auto *NewE = SE.getSCEV(NewVal);
280
69
281
69
      // While the mapped value might be different the SCEV representation might
282
69
      // not be. To this end we will check before we go into recursion here.
283
69
      if (E != NewE)
284
66
        return visit(NewE);
285
1.58k
    }
286
1.58k
287
1.58k
    Instruction *Inst = dyn_cast<Instruction>(E->getValue());
288
1.58k
    Instruction *IP;
289
1.58k
    if (
Inst && 1.58k
!R.contains(Inst)691
)
290
689
      IP = Inst;
291
897
    else 
if (897
Inst && 897
RTCBB->getParent() == Inst->getFunction()2
)
292
2
      IP = RTCBB->getTerminator();
293
897
    else
294
895
      IP = RTCBB->getParent()->getEntryBlock().getTerminator();
295
1.58k
296
1.58k
    if (
!Inst || 1.58k
(Inst->getOpcode() != Instruction::SRem &&
297
691
                  Inst->getOpcode() != Instruction::SDiv))
298
1.58k
      return visitGenericInst(E, Inst, IP);
299
3
300
3
    const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0));
301
3
    const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1));
302
3
303
3
    if (!SE.isKnownNonZero(RHSScev))
304
1
      RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
305
1.65k
306
1.65k
    Value *LHS = expandCodeFor(LHSScev, E->getType(), IP);
307
1.65k
    Value *RHS = expandCodeFor(RHSScev, E->getType(), IP);
308
1.65k
309
1.65k
    Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(),
310
1.65k
                                  LHS, RHS, Inst->getName() + Name, IP);
311
1.65k
    return SE.getSCEV(Inst);
312
1.65k
  }
313
314
  /// The following functions will just traverse the SCEV and rebuild it with
315
  /// the new operands returned by the traversal.
316
  ///
317
  ///{
318
1.02k
  const SCEV *visitConstant(const SCEVConstant *E) { return E; }
319
94
  const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) {
320
94
    return SE.getTruncateExpr(visit(E->getOperand()), E->getType());
321
94
  }
322
18
  const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) {
323
18
    return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType());
324
18
  }
325
50
  const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) {
326
50
    return SE.getSignExtendExpr(visit(E->getOperand()), E->getType());
327
50
  }
328
13
  const SCEV *visitUDivExpr(const SCEVUDivExpr *E) {
329
13
    auto *RHSScev = visit(E->getRHS());
330
13
    if (!SE.isKnownNonZero(RHSScev))
331
8
      RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
332
13
    return SE.getUDivExpr(visit(E->getLHS()), RHSScev);
333
13
  }
334
580
  const SCEV *visitAddExpr(const SCEVAddExpr *E) {
335
580
    SmallVector<const SCEV *, 4> NewOps;
336
580
    for (const SCEV *Op : E->operands())
337
1.33k
      NewOps.push_back(visit(Op));
338
580
    return SE.getAddExpr(NewOps);
339
580
  }
340
511
  const SCEV *visitMulExpr(const SCEVMulExpr *E) {
341
511
    SmallVector<const SCEV *, 4> NewOps;
342
511
    for (const SCEV *Op : E->operands())
343
1.05k
      NewOps.push_back(visit(Op));
344
511
    return SE.getMulExpr(NewOps);
345
511
  }
346
3
  const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
347
3
    SmallVector<const SCEV *, 4> NewOps;
348
3
    for (const SCEV *Op : E->operands())
349
6
      NewOps.push_back(visit(Op));
350
3
    return SE.getUMaxExpr(NewOps);
351
3
  }
352
1
  const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
353
1
    SmallVector<const SCEV *, 4> NewOps;
354
1
    for (const SCEV *Op : E->operands())
355
2
      NewOps.push_back(visit(Op));
356
1
    return SE.getSMaxExpr(NewOps);
357
1
  }
358
34
  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
359
34
    SmallVector<const SCEV *, 4> NewOps;
360
34
    for (const SCEV *Op : E->operands())
361
68
      NewOps.push_back(visit(Op));
362
34
    return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags());
363
34
  }
364
  ///}
365
};
366
367
Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL,
368
                            const char *Name, const SCEV *E, Type *Ty,
369
                            Instruction *IP, ValueMapT *VMap,
370
1.24k
                            BasicBlock *RTCBB) {
371
1.24k
  ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap, RTCBB);
372
1.24k
  return Expander.expandCodeFor(E, Ty, IP);
373
1.24k
}
374
375
bool polly::isErrorBlock(BasicBlock &BB, const Region &R, LoopInfo &LI,
376
92.6k
                         const DominatorTree &DT) {
377
92.6k
  if (!PollyAllowErrorBlocks)
378
0
    return false;
379
92.6k
380
92.6k
  
if (92.6k
isa<UnreachableInst>(BB.getTerminator())92.6k
)
381
0
    return true;
382
92.6k
383
92.6k
  
if (92.6k
LI.isLoopHeader(&BB)92.6k
)
384
35.4k
    return false;
385
57.1k
386
57.1k
  // Basic blocks that are always executed are not considered error blocks,
387
57.1k
  // as their execution can not be a rare event.
388
57.1k
  bool DominatesAllPredecessors = true;
389
57.1k
  if (
R.isTopLevelRegion()57.1k
) {
390
101
    for (BasicBlock &I : *R.getEntry()->getParent())
391
550
      
if (550
isa<ReturnInst>(I.getTerminator()) && 550
!DT.dominates(&BB, &I)101
)
392
12
        DominatesAllPredecessors = false;
393
57.1k
  } else {
394
57.0k
    for (auto Pred : predecessors(R.getExit()))
395
89.4k
      
if (89.4k
R.contains(Pred) && 89.4k
!DT.dominates(&BB, Pred)84.1k
)
396
41.8k
        DominatesAllPredecessors = false;
397
57.0k
  }
398
57.1k
399
57.1k
  if (DominatesAllPredecessors)
400
20.9k
    return false;
401
36.2k
402
36.2k
  // FIXME: This is a simple heuristic to determine if the load is executed
403
36.2k
  //        in a conditional. However, we actually would need the control
404
36.2k
  //        condition, i.e., the post dominance frontier. Alternatively we
405
36.2k
  //        could walk up the dominance tree until we find a block that is
406
36.2k
  //        not post dominated by the load and check if it is a conditional
407
36.2k
  //        or a loop header.
408
36.2k
  auto *DTNode = DT.getNode(&BB);
409
36.2k
  if (!DTNode)
410
1
    return false;
411
36.2k
412
36.2k
  DTNode = DTNode->getIDom();
413
36.2k
414
36.2k
  if (!DTNode)
415
0
    return false;
416
36.2k
417
36.2k
  auto *IDomBB = DTNode->getBlock();
418
36.2k
  if (LI.isLoopHeader(IDomBB))
419
18.3k
    return false;
420
17.8k
421
17.8k
  for (Instruction &Inst : BB)
422
55.9k
    
if (CallInst *55.9k
CI55.9k
= dyn_cast<CallInst>(&Inst)) {
423
380
      if (isIgnoredIntrinsic(CI))
424
61
        continue;
425
319
426
319
      // memset, memcpy and memmove are modeled intrinsics.
427
319
      
if (319
isa<MemSetInst>(CI) || 319
isa<MemTransferInst>(CI)301
)
428
18
        continue;
429
301
430
301
      
if (301
!CI->doesNotAccessMemory()301
)
431
281
        return true;
432
20
      
if (20
CI->doesNotReturn()20
)
433
0
        return true;
434
17.5k
    }
435
17.5k
436
17.5k
  return false;
437
17.5k
}
438
439
31.2k
Value *polly::getConditionFromTerminator(TerminatorInst *TI) {
440
31.2k
  if (BranchInst *
BR31.2k
= dyn_cast<BranchInst>(TI)) {
441
31.2k
    if (BR->isUnconditional())
442
14.2k
      return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext()));
443
16.9k
444
16.9k
    return BR->getCondition();
445
16.9k
  }
446
66
447
66
  
if (SwitchInst *66
SI66
= dyn_cast<SwitchInst>(TI))
448
66
    return SI->getCondition();
449
0
450
0
  return nullptr;
451
0
}
452
453
bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI,
454
2.98k
                            ScalarEvolution &SE, const DominatorTree &DT) {
455
2.98k
  Loop *L = LI.getLoopFor(LInst->getParent());
456
2.98k
  auto *Ptr = LInst->getPointerOperand();
457
2.98k
  const SCEV *PtrSCEV = SE.getSCEVAtScope(Ptr, L);
458
3.31k
  while (
L && 3.31k
R.contains(L)844
) {
459
341
    if (!SE.isLoopInvariant(PtrSCEV, L))
460
17
      return false;
461
324
    L = L->getParentLoop();
462
324
  }
463
2.98k
464
2.97k
  
for (auto *User : Ptr->users()) 2.97k
{
465
21.4k
    auto *UserI = dyn_cast<Instruction>(User);
466
21.4k
    if (
!UserI || 21.4k
!R.contains(UserI)21.4k
)
467
3.05k
      continue;
468
18.3k
    
if (18.3k
!UserI->mayWriteToMemory()18.3k
)
469
17.9k
      continue;
470
388
471
388
    auto &BB = *UserI->getParent();
472
388
    if (DT.dominates(&BB, LInst->getParent()))
473
121
      return false;
474
267
475
267
    bool DominatesAllPredecessors = true;
476
267
    for (auto Pred : predecessors(R.getExit()))
477
526
      
if (526
R.contains(Pred) && 526
!DT.dominates(&BB, Pred)526
)
478
492
        DominatesAllPredecessors = false;
479
267
480
267
    if (!DominatesAllPredecessors)
481
267
      continue;
482
0
483
0
    return false;
484
0
  }
485
2.85k
486
2.85k
  return true;
487
2.85k
}
488
489
37.0k
bool polly::isIgnoredIntrinsic(const Value *V) {
490
37.0k
  if (auto *
IT37.0k
= dyn_cast<IntrinsicInst>(V)) {
491
292
    switch (IT->getIntrinsicID()) {
492
292
    // Lifetime markers are supported/ignored.
493
160
    case llvm::Intrinsic::lifetime_start:
494
160
    case llvm::Intrinsic::lifetime_end:
495
160
    // Invariant markers are supported/ignored.
496
160
    case llvm::Intrinsic::invariant_start:
497
160
    case llvm::Intrinsic::invariant_end:
498
160
    // Some misc annotations are supported/ignored.
499
160
    case llvm::Intrinsic::var_annotation:
500
160
    case llvm::Intrinsic::ptr_annotation:
501
160
    case llvm::Intrinsic::annotation:
502
160
    case llvm::Intrinsic::donothing:
503
160
    case llvm::Intrinsic::assume:
504
160
    // Some debug info intrinsics are supported/ignored.
505
160
    case llvm::Intrinsic::dbg_value:
506
160
    case llvm::Intrinsic::dbg_declare:
507
160
      return true;
508
132
    default:
509
132
      break;
510
36.9k
    }
511
36.9k
  }
512
36.9k
  return false;
513
36.9k
}
514
515
bool polly::canSynthesize(const Value *V, const Scop &S, ScalarEvolution *SE,
516
37.8k
                          Loop *Scope) {
517
37.8k
  if (
!V || 37.8k
!SE->isSCEVable(V->getType())37.8k
)
518
4.16k
    return false;
519
33.7k
520
33.7k
  const InvariantLoadsSetTy &ILS = S.getRequiredInvariantLoads();
521
33.7k
  if (const SCEV *Scev = SE->getSCEVAtScope(const_cast<Value *>(V), Scope))
522
33.7k
    
if (33.7k
!isa<SCEVCouldNotCompute>(Scev)33.7k
)
523
33.7k
      
if (33.7k
!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false, ILS)33.7k
)
524
26.0k
        return true;
525
7.63k
526
7.63k
  return false;
527
7.63k
}
528
529
18.3k
llvm::BasicBlock *polly::getUseBlock(const llvm::Use &U) {
530
18.3k
  Instruction *UI = dyn_cast<Instruction>(U.getUser());
531
18.3k
  if (!UI)
532
0
    return nullptr;
533
18.3k
534
18.3k
  
if (PHINode *18.3k
PHI18.3k
= dyn_cast<PHINode>(UI))
535
1.92k
    return PHI->getIncomingBlock(U);
536
16.4k
537
16.4k
  return UI->getParent();
538
16.4k
}
539
540
std::tuple<std::vector<const SCEV *>, std::vector<int>>
541
2.66k
polly::getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) {
542
2.66k
  std::vector<const SCEV *> Subscripts;
543
2.66k
  std::vector<int> Sizes;
544
2.66k
545
2.66k
  Type *Ty = GEP->getPointerOperandType();
546
2.66k
547
2.66k
  bool DroppedFirstDim = false;
548
2.66k
549
6.00k
  for (unsigned i = 1; 
i < GEP->getNumOperands()6.00k
;
i++3.33k
) {
550
3.43k
551
3.43k
    const SCEV *Expr = SE.getSCEV(GEP->getOperand(i));
552
3.43k
553
3.43k
    if (
i == 13.43k
) {
554
2.66k
      if (auto *
PtrTy2.66k
= dyn_cast<PointerType>(Ty)) {
555
2.66k
        Ty = PtrTy->getElementType();
556
2.66k
      } else 
if (auto *0
ArrayTy0
= dyn_cast<ArrayType>(Ty)) {
557
0
        Ty = ArrayTy->getElementType();
558
0
      } else {
559
0
        Subscripts.clear();
560
0
        Sizes.clear();
561
0
        break;
562
0
      }
563
2.66k
      
if (auto *2.66k
Const2.66k
= dyn_cast<SCEVConstant>(Expr))
564
772
        
if (772
Const->getValue()->isZero()772
) {
565
561
          DroppedFirstDim = true;
566
561
          continue;
567
561
        }
568
2.10k
      Subscripts.push_back(Expr);
569
2.10k
      continue;
570
2.10k
    }
571
772
572
772
    auto *ArrayTy = dyn_cast<ArrayType>(Ty);
573
772
    if (
!ArrayTy772
) {
574
105
      Subscripts.clear();
575
105
      Sizes.clear();
576
105
      break;
577
105
    }
578
667
579
667
    Subscripts.push_back(Expr);
580
667
    if (
!(DroppedFirstDim && 667
i == 2478
))
581
257
      Sizes.push_back(ArrayTy->getNumElements());
582
3.43k
583
3.43k
    Ty = ArrayTy->getElementType();
584
3.43k
  }
585
2.66k
586
2.66k
  return std::make_tuple(Subscripts, Sizes);
587
2.66k
}
588
589
llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
590
13.8k
                                           const BoxedLoopsSetTy &BoxedLoops) {
591
13.9k
  while (BoxedLoops.count(L))
592
46
    L = L->getParentLoop();
593
13.8k
  return L;
594
13.8k
}
595
596
llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::BasicBlock *BB,
597
                                           llvm::LoopInfo &LI,
598
13.8k
                                           const BoxedLoopsSetTy &BoxedLoops) {
599
13.8k
  Loop *L = LI.getLoopFor(BB);
600
13.8k
  return getFirstNonBoxedLoopFor(L, LI, BoxedLoops);
601
13.8k
}