Coverage Report

Created: 2018-02-20 23:11

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/polly/lib/Support/ScopHelper.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- ScopHelper.cpp - Some Helper Functions for Scop.  ------------------===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
//
10
// Small functions that help with Scop and LLVM-IR.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#include "polly/Support/ScopHelper.h"
15
#include "polly/Options.h"
16
#include "polly/ScopInfo.h"
17
#include "polly/Support/SCEVValidator.h"
18
#include "llvm/Analysis/LoopInfo.h"
19
#include "llvm/Analysis/RegionInfo.h"
20
#include "llvm/Analysis/ScalarEvolution.h"
21
#include "llvm/Analysis/ScalarEvolutionExpander.h"
22
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
23
#include "llvm/IR/CFG.h"
24
#include "llvm/IR/IntrinsicInst.h"
25
#include "llvm/Support/Debug.h"
26
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
27
28
using namespace llvm;
29
using namespace polly;
30
31
#define DEBUG_TYPE "polly-scop-helper"
32
33
static cl::opt<bool> PollyAllowErrorBlocks(
34
    "polly-allow-error-blocks",
35
    cl::desc("Allow to speculate on the execution of 'error blocks'."),
36
    cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
37
38
// Ensures that there is just one predecessor to the entry node from outside the
39
// region.
40
// The identity of the region entry node is preserved.
41
static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI,
42
291
                                RegionInfo *RI) {
43
291
  BasicBlock *EnteringBB = R->getEnteringBlock();
44
291
  BasicBlock *Entry = R->getEntry();
45
291
46
291
  // Before (one of):
47
291
  //
48
291
  //                       \    /            //
49
291
  //                      EnteringBB         //
50
291
  //                        |    \------>    //
51
291
  //   \   /                |                //
52
291
  //   Entry <--\         Entry <--\         //
53
291
  //   /   \    /         /   \    /         //
54
291
  //        ....               ....          //
55
291
56
291
  // Create single entry edge if the region has multiple entry edges.
57
291
  if (!EnteringBB) {
58
6
    SmallVector<BasicBlock *, 4> Preds;
59
6
    for (BasicBlock *P : predecessors(Entry))
60
12
      if (!R->contains(P))
61
12
        Preds.push_back(P);
62
6
63
6
    BasicBlock *NewEntering =
64
6
        SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI);
65
6
66
6
    if (RI) {
67
6
      // The exit block of predecessing regions must be changed to NewEntering
68
12
      for (BasicBlock *ExitPred : predecessors(NewEntering)) {
69
12
        Region *RegionOfPred = RI->getRegionFor(ExitPred);
70
12
        if (RegionOfPred->getExit() != Entry)
71
11
          continue;
72
1
73
2
        
while (1
!RegionOfPred->isTopLevelRegion() &&
74
2
               
RegionOfPred->getExit() == Entry1
) {
75
1
          RegionOfPred->replaceExit(NewEntering);
76
1
          RegionOfPred = RegionOfPred->getParent();
77
1
        }
78
1
      }
79
6
80
6
      // Make all ancestors use EnteringBB as entry; there might be edges to it
81
6
      Region *AncestorR = R->getParent();
82
6
      RI->setRegionFor(NewEntering, AncestorR);
83
11
      while (!AncestorR->isTopLevelRegion() && 
AncestorR->getEntry() == Entry6
) {
84
5
        AncestorR->replaceEntry(NewEntering);
85
5
        AncestorR = AncestorR->getParent();
86
5
      }
87
6
    }
88
6
89
6
    EnteringBB = NewEntering;
90
6
  }
91
291
  assert(R->getEnteringBlock() == EnteringBB);
92
291
93
291
  // After:
94
291
  //
95
291
  //    \    /       //
96
291
  //  EnteringBB     //
97
291
  //      |          //
98
291
  //      |          //
99
291
  //    Entry <--\   //
100
291
  //    /   \    /   //
101
291
  //         ....    //
102
291
}
103
104
// Ensure that the region has a single block that branches to the exit node.
105
static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI,
106
291
                               RegionInfo *RI) {
107
291
  BasicBlock *ExitBB = R->getExit();
108
291
  BasicBlock *ExitingBB = R->getExitingBlock();
109
291
110
291
  // Before:
111
291
  //
112
291
  //   (Region)   ______/  //
113
291
  //      \  |   /         //
114
291
  //       ExitBB          //
115
291
  //       /    \          //
116
291
117
291
  if (!ExitingBB) {
118
60
    SmallVector<BasicBlock *, 4> Preds;
119
60
    for (BasicBlock *P : predecessors(ExitBB))
120
140
      if (R->contains(P))
121
124
        Preds.push_back(P);
122
60
123
60
    //  Preds[0] Preds[1]      otherBB //
124
60
    //         \  |  ________/         //
125
60
    //          \ | /                  //
126
60
    //           BB                    //
127
60
    ExitingBB =
128
60
        SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI);
129
60
    // Preds[0] Preds[1]      otherBB  //
130
60
    //        \  /           /         //
131
60
    // BB.region_exiting    /          //
132
60
    //                  \  /           //
133
60
    //                   BB            //
134
60
135
60
    if (RI)
136
60
      RI->setRegionFor(ExitingBB, R);
137
60
138
60
    // Change the exit of nested regions, but not the region itself,
139
60
    R->replaceExitRecursive(ExitingBB);
140
60
    R->replaceExit(ExitBB);
141
60
  }
142
291
  assert(ExitingBB == R->getExitingBlock());
143
291
144
291
  // After:
145
291
  //
146
291
  //     \   /                //
147
291
  //    ExitingBB     _____/  //
148
291
  //          \      /        //
149
291
  //           ExitBB         //
150
291
  //           /    \         //
151
291
}
152
153
void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI,
154
291
                           RegionInfo *RI) {
155
291
  assert(R && !R->isTopLevelRegion());
156
291
  assert(!RI || RI == R->getRegionInfo());
157
291
  assert((!RI || DT) &&
158
291
         "RegionInfo requires DominatorTree to be updated as well");
159
291
160
291
  simplifyRegionEntry(R, DT, LI, RI);
161
291
  simplifyRegionExit(R, DT, LI, RI);
162
291
  assert(R->isSimple());
163
291
}
164
165
// Split the block into two successive blocks.
166
//
167
// Like llvm::SplitBlock, but also preserves RegionInfo
168
static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt,
169
                              DominatorTree *DT, llvm::LoopInfo *LI,
170
13
                              RegionInfo *RI) {
171
13
  assert(Old && SplitPt);
172
13
173
13
  // Before:
174
13
  //
175
13
  //  \   /  //
176
13
  //   Old   //
177
13
  //  /   \  //
178
13
179
13
  BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI);
180
13
181
13
  if (RI) {
182
0
    Region *R = RI->getRegionFor(Old);
183
0
    RI->setRegionFor(NewBlock, R);
184
0
  }
185
13
186
13
  // After:
187
13
  //
188
13
  //   \   /    //
189
13
  //    Old     //
190
13
  //     |      //
191
13
  //  NewBlock  //
192
13
  //   /   \    //
193
13
194
13
  return NewBlock;
195
13
}
196
197
void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, DominatorTree *DT,
198
13
                                     LoopInfo *LI, RegionInfo *RI) {
199
13
  // Find first non-alloca instruction. Every basic block has a non-alloca
200
13
  // instruction, as every well formed basic block has a terminator.
201
13
  BasicBlock::iterator I = EntryBlock->begin();
202
13
  while (isa<AllocaInst>(I))
203
0
    ++I;
204
13
205
13
  // splitBlock updates DT, LI and RI.
206
13
  splitBlock(EntryBlock, &*I, DT, LI, RI);
207
13
}
208
209
13
void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) {
210
13
  auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
211
13
  auto *DT = DTWP ? &DTWP->getDomTree() : 
nullptr0
;
212
13
  auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>();
213
13
  auto *LI = LIWP ? &LIWP->getLoopInfo() : 
nullptr0
;
214
13
  RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>();
215
13
  RegionInfo *RI = RIP ? 
&RIP->getRegionInfo()0
: nullptr;
216
13
217
13
  // splitBlock updates DT, LI and RI.
218
13
  polly::splitEntryBlockForAlloca(EntryBlock, DT, LI, RI);
219
13
}
220
221
/// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem
222
/// instruction but just use it, if it is referenced as a SCEVUnknown. We want
223
/// however to generate new code if the instruction is in the analyzed region
224
/// and we generate code outside/in front of that region. Hence, we generate the
225
/// code for the SDiv/SRem operands in front of the analyzed region and then
226
/// create a new SDiv/SRem operation there too.
227
struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> {
228
  friend struct SCEVVisitor<ScopExpander, const SCEV *>;
229
230
  explicit ScopExpander(const Region &R, ScalarEvolution &SE,
231
                        const DataLayout &DL, const char *Name, ValueMapT *VMap,
232
                        BasicBlock *RTCBB)
233
      : Expander(SCEVExpander(SE, DL, Name)), SE(SE), Name(Name), R(R),
234
1.10k
        VMap(VMap), RTCBB(RTCBB) {}
235
236
1.10k
  Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) {
237
1.10k
    // If we generate code in the region we will immediately fall back to the
238
1.10k
    // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if
239
1.10k
    // needed replace them by copies computed in the entering block.
240
1.10k
    if (!R.contains(I))
241
1.10k
      E = visit(E);
242
1.10k
    return Expander.expandCodeFor(E, Ty, I);
243
1.10k
  }
244
245
private:
246
  SCEVExpander Expander;
247
  ScalarEvolution &SE;
248
  const char *Name;
249
  const Region &R;
250
  ValueMapT *VMap;
251
  BasicBlock *RTCBB;
252
253
  const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst,
254
1.43k
                               Instruction *IP) {
255
1.43k
    if (!Inst || 
!R.contains(Inst)595
)
256
1.43k
      return E;
257
2
258
2
    assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() &&
259
2
           !isa<PHINode>(Inst));
260
2
261
2
    auto *InstClone = Inst->clone();
262
2
    for (auto &Op : Inst->operands()) {
263
2
      assert(SE.isSCEVable(Op->getType()));
264
2
      auto *OpSCEV = SE.getSCEV(Op);
265
2
      auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP);
266
2
      InstClone->replaceUsesOfWith(Op, OpClone);
267
2
    }
268
2
269
2
    InstClone->setName(Name + Inst->getName());
270
2
    InstClone->insertBefore(IP);
271
2
    return SE.getSCEV(InstClone);
272
2
  }
273
274
1.49k
  const SCEV *visitUnknown(const SCEVUnknown *E) {
275
1.49k
276
1.49k
    // If a value mapping was given try if the underlying value is remapped.
277
1.49k
    Value *NewVal = VMap ? 
VMap->lookup(E->getValue())1.47k
:
nullptr16
;
278
1.49k
    if (NewVal) {
279
64
      auto *NewE = SE.getSCEV(NewVal);
280
64
281
64
      // While the mapped value might be different the SCEV representation might
282
64
      // not be. To this end we will check before we go into recursion here.
283
64
      if (E != NewE)
284
61
        return visit(NewE);
285
1.43k
    }
286
1.43k
287
1.43k
    Instruction *Inst = dyn_cast<Instruction>(E->getValue());
288
1.43k
    Instruction *IP;
289
1.43k
    if (Inst && 
!R.contains(Inst)596
)
290
594
      IP = Inst;
291
839
    else if (Inst && 
RTCBB->getParent() == Inst->getFunction()2
)
292
2
      IP = RTCBB->getTerminator();
293
837
    else
294
837
      IP = RTCBB->getParent()->getEntryBlock().getTerminator();
295
1.43k
296
1.43k
    if (!Inst || 
(596
Inst->getOpcode() != Instruction::SRem596
&&
297
596
                  Inst->getOpcode() != Instruction::SDiv))
298
1.43k
      return visitGenericInst(E, Inst, IP);
299
1
300
1
    const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0));
301
1
    const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1));
302
1
303
1
    if (!SE.isKnownNonZero(RHSScev))
304
1
      RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
305
1
306
1
    Value *LHS = expandCodeFor(LHSScev, E->getType(), IP);
307
1
    Value *RHS = expandCodeFor(RHSScev, E->getType(), IP);
308
1
309
1
    Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(),
310
1
                                  LHS, RHS, Inst->getName() + Name, IP);
311
1
    return SE.getSCEV(Inst);
312
1
  }
313
314
  /// The following functions will just traverse the SCEV and rebuild it with
315
  /// the new operands returned by the traversal.
316
  ///
317
  ///{
318
906
  const SCEV *visitConstant(const SCEVConstant *E) { return E; }
319
76
  const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) {
320
76
    return SE.getTruncateExpr(visit(E->getOperand()), E->getType());
321
76
  }
322
15
  const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) {
323
15
    return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType());
324
15
  }
325
36
  const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) {
326
36
    return SE.getSignExtendExpr(visit(E->getOperand()), E->getType());
327
36
  }
328
14
  const SCEV *visitUDivExpr(const SCEVUDivExpr *E) {
329
14
    auto *RHSScev = visit(E->getRHS());
330
14
    if (!SE.isKnownNonZero(RHSScev))
331
9
      RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
332
14
    return SE.getUDivExpr(visit(E->getLHS()), RHSScev);
333
14
  }
334
480
  const SCEV *visitAddExpr(const SCEVAddExpr *E) {
335
480
    SmallVector<const SCEV *, 4> NewOps;
336
480
    for (const SCEV *Op : E->operands())
337
1.12k
      NewOps.push_back(visit(Op));
338
480
    return SE.getAddExpr(NewOps);
339
480
  }
340
503
  const SCEV *visitMulExpr(const SCEVMulExpr *E) {
341
503
    SmallVector<const SCEV *, 4> NewOps;
342
503
    for (const SCEV *Op : E->operands())
343
1.04k
      NewOps.push_back(visit(Op));
344
503
    return SE.getMulExpr(NewOps);
345
503
  }
346
2
  const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
347
2
    SmallVector<const SCEV *, 4> NewOps;
348
2
    for (const SCEV *Op : E->operands())
349
4
      NewOps.push_back(visit(Op));
350
2
    return SE.getUMaxExpr(NewOps);
351
2
  }
352
0
  const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
353
0
    SmallVector<const SCEV *, 4> NewOps;
354
0
    for (const SCEV *Op : E->operands())
355
0
      NewOps.push_back(visit(Op));
356
0
    return SE.getSMaxExpr(NewOps);
357
0
  }
358
34
  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
359
34
    SmallVector<const SCEV *, 4> NewOps;
360
34
    for (const SCEV *Op : E->operands())
361
68
      NewOps.push_back(visit(Op));
362
34
    return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags());
363
34
  }
364
  ///}
365
};
366
367
Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL,
368
                            const char *Name, const SCEV *E, Type *Ty,
369
                            Instruction *IP, ValueMapT *VMap,
370
1.10k
                            BasicBlock *RTCBB) {
371
1.10k
  ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap, RTCBB);
372
1.10k
  return Expander.expandCodeFor(E, Ty, IP);
373
1.10k
}
374
375
bool polly::isErrorBlock(BasicBlock &BB, const Region &R, LoopInfo &LI,
376
103k
                         const DominatorTree &DT) {
377
103k
  if (!PollyAllowErrorBlocks)
378
0
    return false;
379
103k
380
103k
  if (isa<UnreachableInst>(BB.getTerminator()))
381
0
    return true;
382
103k
383
103k
  if (LI.isLoopHeader(&BB))
384
39.7k
    return false;
385
64.2k
386
64.2k
  // Basic blocks that are always executed are not considered error blocks,
387
64.2k
  // as their execution can not be a rare event.
388
64.2k
  bool DominatesAllPredecessors = true;
389
64.2k
  if (R.isTopLevelRegion()) {
390
117
    for (BasicBlock &I : *R.getEntry()->getParent())
391
634
      if (isa<ReturnInst>(I.getTerminator()) && 
!DT.dominates(&BB, &I)117
)
392
14
        DominatesAllPredecessors = false;
393
64.1k
  } else {
394
64.1k
    for (auto Pred : predecessors(R.getExit()))
395
98.9k
      if (R.contains(Pred) && 
!DT.dominates(&BB, Pred)93.1k
)
396
47.8k
        DominatesAllPredecessors = false;
397
64.1k
  }
398
64.2k
399
64.2k
  if (DominatesAllPredecessors)
400
22.5k
    return false;
401
41.6k
402
41.6k
  // FIXME: This is a simple heuristic to determine if the load is executed
403
41.6k
  //        in a conditional. However, we actually would need the control
404
41.6k
  //        condition, i.e., the post dominance frontier. Alternatively we
405
41.6k
  //        could walk up the dominance tree until we find a block that is
406
41.6k
  //        not post dominated by the load and check if it is a conditional
407
41.6k
  //        or a loop header.
408
41.6k
  auto *DTNode = DT.getNode(&BB);
409
41.6k
  if (!DTNode)
410
1
    return false;
411
41.6k
412
41.6k
  DTNode = DTNode->getIDom();
413
41.6k
414
41.6k
  if (!DTNode)
415
0
    return false;
416
41.6k
417
41.6k
  auto *IDomBB = DTNode->getBlock();
418
41.6k
  if (LI.isLoopHeader(IDomBB))
419
20.8k
    return false;
420
20.8k
421
20.8k
  for (Instruction &Inst : BB)
422
63.9k
    if (CallInst *CI = dyn_cast<CallInst>(&Inst)) {
423
480
      if (isIgnoredIntrinsic(CI))
424
70
        continue;
425
410
426
410
      // memset, memcpy and memmove are modeled intrinsics.
427
410
      if (isa<MemSetInst>(CI) || 
isa<MemTransferInst>(CI)385
)
428
25
        continue;
429
385
430
385
      if (!CI->doesNotAccessMemory())
431
361
        return true;
432
24
      if (CI->doesNotReturn())
433
0
        return true;
434
24
    }
435
20.8k
436
20.8k
  
return false20.4k
;
437
20.8k
}
438
439
32.1k
Value *polly::getConditionFromTerminator(TerminatorInst *TI) {
440
32.1k
  if (BranchInst *BR = dyn_cast<BranchInst>(TI)) {
441
32.1k
    if (BR->isUnconditional())
442
14.7k
      return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext()));
443
17.4k
444
17.4k
    return BR->getCondition();
445
17.4k
  }
446
66
447
66
  if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
448
66
    return SI->getCondition();
449
0
450
0
  return nullptr;
451
0
}
452
453
bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI,
454
2.99k
                            ScalarEvolution &SE, const DominatorTree &DT) {
455
2.99k
  Loop *L = LI.getLoopFor(LInst->getParent());
456
2.99k
  auto *Ptr = LInst->getPointerOperand();
457
2.99k
  const SCEV *PtrSCEV = SE.getSCEVAtScope(Ptr, L);
458
3.33k
  while (L && 
R.contains(L)853
) {
459
350
    if (!SE.isLoopInvariant(PtrSCEV, L))
460
17
      return false;
461
333
    L = L->getParentLoop();
462
333
  }
463
2.99k
464
21.4k
  
for (auto *User : Ptr->users())2.98k
{
465
21.4k
    auto *UserI = dyn_cast<Instruction>(User);
466
21.4k
    if (!UserI || !R.contains(UserI))
467
3.06k
      continue;
468
18.4k
    if (!UserI->mayWriteToMemory())
469
18.0k
      continue;
470
396
471
396
    auto &BB = *UserI->getParent();
472
396
    if (DT.dominates(&BB, LInst->getParent()))
473
122
      return false;
474
274
475
274
    bool DominatesAllPredecessors = true;
476
274
    if (R.isTopLevelRegion()) {
477
1
      for (BasicBlock &I : *R.getEntry()->getParent())
478
5
        if (isa<ReturnInst>(I.getTerminator()) && 
!DT.dominates(&BB, &I)1
)
479
1
          DominatesAllPredecessors = false;
480
273
    } else {
481
273
      for (auto Pred : predecessors(R.getExit()))
482
532
        if (R.contains(Pred) && !DT.dominates(&BB, Pred))
483
498
          DominatesAllPredecessors = false;
484
273
    }
485
274
486
274
    if (!DominatesAllPredecessors)
487
274
      continue;
488
0
489
0
    return false;
490
0
  }
491
2.98k
492
2.98k
  
return true2.85k
;
493
2.98k
}
494
495
16.9k
bool polly::isIgnoredIntrinsic(const Value *V) {
496
16.9k
  if (auto *IT = dyn_cast<IntrinsicInst>(V)) {
497
253
    switch (IT->getIntrinsicID()) {
498
253
    // Lifetime markers are supported/ignored.
499
253
    case llvm::Intrinsic::lifetime_start:
500
128
    case llvm::Intrinsic::lifetime_end:
501
128
    // Invariant markers are supported/ignored.
502
128
    case llvm::Intrinsic::invariant_start:
503
128
    case llvm::Intrinsic::invariant_end:
504
128
    // Some misc annotations are supported/ignored.
505
128
    case llvm::Intrinsic::var_annotation:
506
128
    case llvm::Intrinsic::ptr_annotation:
507
128
    case llvm::Intrinsic::annotation:
508
128
    case llvm::Intrinsic::donothing:
509
128
    case llvm::Intrinsic::assume:
510
128
    // Some debug info intrinsics are supported/ignored.
511
128
    case llvm::Intrinsic::dbg_value:
512
128
    case llvm::Intrinsic::dbg_declare:
513
128
      return true;
514
128
    default:
515
125
      break;
516
16.8k
    }
517
16.8k
  }
518
16.8k
  return false;
519
16.8k
}
520
521
bool polly::canSynthesize(const Value *V, const Scop &S, ScalarEvolution *SE,
522
27.7k
                          Loop *Scope) {
523
27.7k
  if (!V || !SE->isSCEVable(V->getType()))
524
4.33k
    return false;
525
23.4k
526
23.4k
  const InvariantLoadsSetTy &ILS = S.getRequiredInvariantLoads();
527
23.4k
  if (const SCEV *Scev = SE->getSCEVAtScope(const_cast<Value *>(V), Scope))
528
23.4k
    if (!isa<SCEVCouldNotCompute>(Scev))
529
23.4k
      if (!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false, ILS))
530
15.9k
        return true;
531
7.53k
532
7.53k
  return false;
533
7.53k
}
534
535
18.7k
llvm::BasicBlock *polly::getUseBlock(const llvm::Use &U) {
536
18.7k
  Instruction *UI = dyn_cast<Instruction>(U.getUser());
537
18.7k
  if (!UI)
538
0
    return nullptr;
539
18.7k
540
18.7k
  if (PHINode *PHI = dyn_cast<PHINode>(UI))
541
2.02k
    return PHI->getIncomingBlock(U);
542
16.7k
543
16.7k
  return UI->getParent();
544
16.7k
}
545
546
std::tuple<std::vector<const SCEV *>, std::vector<int>>
547
2.71k
polly::getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) {
548
2.71k
  std::vector<const SCEV *> Subscripts;
549
2.71k
  std::vector<int> Sizes;
550
2.71k
551
2.71k
  Type *Ty = GEP->getPointerOperandType();
552
2.71k
553
2.71k
  bool DroppedFirstDim = false;
554
2.71k
555
6.11k
  for (unsigned i = 1; i < GEP->getNumOperands(); 
i++3.39k
) {
556
3.49k
557
3.49k
    const SCEV *Expr = SE.getSCEV(GEP->getOperand(i));
558
3.49k
559
3.49k
    if (i == 1) {
560
2.71k
      if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
561
2.71k
        Ty = PtrTy->getElementType();
562
2.71k
      } else 
if (auto *0
ArrayTy0
= dyn_cast<ArrayType>(Ty)) {
563
0
        Ty = ArrayTy->getElementType();
564
0
      } else {
565
0
        Subscripts.clear();
566
0
        Sizes.clear();
567
0
        break;
568
0
      }
569
2.71k
      if (auto *Const = dyn_cast<SCEVConstant>(Expr))
570
785
        if (Const->getValue()->isZero()) {
571
572
          DroppedFirstDim = true;
572
572
          continue;
573
572
        }
574
2.14k
      Subscripts.push_back(Expr);
575
2.14k
      continue;
576
2.14k
    }
577
780
578
780
    auto *ArrayTy = dyn_cast<ArrayType>(Ty);
579
780
    if (!ArrayTy) {
580
106
      Subscripts.clear();
581
106
      Sizes.clear();
582
106
      break;
583
106
    }
584
674
585
674
    Subscripts.push_back(Expr);
586
674
    if (!(DroppedFirstDim && 
i == 2483
))
587
259
      Sizes.push_back(ArrayTy->getNumElements());
588
674
589
674
    Ty = ArrayTy->getElementType();
590
674
  }
591
2.71k
592
2.71k
  return std::make_tuple(Subscripts, Sizes);
593
2.71k
}
594
595
llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
596
14.3k
                                           const BoxedLoopsSetTy &BoxedLoops) {
597
14.4k
  while (BoxedLoops.count(L))
598
46
    L = L->getParentLoop();
599
14.3k
  return L;
600
14.3k
}
601
602
llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::BasicBlock *BB,
603
                                           llvm::LoopInfo &LI,
604
14.3k
                                           const BoxedLoopsSetTy &BoxedLoops) {
605
14.3k
  Loop *L = LI.getLoopFor(BB);
606
14.3k
  return getFirstNonBoxedLoopFor(L, LI, BoxedLoops);
607
14.3k
}