/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===- IndVarSimplify.cpp - Induction Variable Elimination ----------------===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | // |
10 | | // This transformation analyzes and transforms the induction variables (and |
11 | | // computations derived from them) into simpler forms suitable for subsequent |
12 | | // analysis and transformation. |
13 | | // |
14 | | // If the trip count of a loop is computable, this pass also makes the following |
15 | | // changes: |
16 | | // 1. The exit condition for the loop is canonicalized to compare the |
17 | | // induction value against the exit value. This turns loops like: |
18 | | // 'for (i = 7; i*i < 1000; ++i)' into 'for (i = 0; i != 25; ++i)' |
19 | | // 2. Any use outside of the loop of an expression derived from the indvar |
20 | | // is changed to compute the derived value outside of the loop, eliminating |
21 | | // the dependence on the exit value of the induction variable. If the only |
22 | | // purpose of the loop is to compute the exit value of some derived |
23 | | // expression, this transformation will make the loop dead. |
24 | | // |
25 | | //===----------------------------------------------------------------------===// |
26 | | |
27 | | #include "llvm/Transforms/Scalar/IndVarSimplify.h" |
28 | | #include "llvm/ADT/SmallVector.h" |
29 | | #include "llvm/ADT/Statistic.h" |
30 | | #include "llvm/Analysis/GlobalsModRef.h" |
31 | | #include "llvm/Analysis/LoopInfo.h" |
32 | | #include "llvm/Analysis/LoopPass.h" |
33 | | #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" |
34 | | #include "llvm/Analysis/ScalarEvolutionExpander.h" |
35 | | #include "llvm/Analysis/TargetLibraryInfo.h" |
36 | | #include "llvm/Analysis/TargetTransformInfo.h" |
37 | | #include "llvm/IR/BasicBlock.h" |
38 | | #include "llvm/IR/CFG.h" |
39 | | #include "llvm/IR/Constants.h" |
40 | | #include "llvm/IR/DataLayout.h" |
41 | | #include "llvm/IR/Dominators.h" |
42 | | #include "llvm/IR/Instructions.h" |
43 | | #include "llvm/IR/IntrinsicInst.h" |
44 | | #include "llvm/IR/LLVMContext.h" |
45 | | #include "llvm/IR/PatternMatch.h" |
46 | | #include "llvm/IR/Type.h" |
47 | | #include "llvm/Support/CommandLine.h" |
48 | | #include "llvm/Support/Debug.h" |
49 | | #include "llvm/Support/raw_ostream.h" |
50 | | #include "llvm/Transforms/Scalar.h" |
51 | | #include "llvm/Transforms/Scalar/LoopPassManager.h" |
52 | | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
53 | | #include "llvm/Transforms/Utils/Local.h" |
54 | | #include "llvm/Transforms/Utils/LoopUtils.h" |
55 | | #include "llvm/Transforms/Utils/SimplifyIndVar.h" |
56 | | using namespace llvm; |
57 | | |
58 | | #define DEBUG_TYPE "indvars" |
59 | | |
60 | | STATISTIC(NumWidened , "Number of indvars widened"); |
61 | | STATISTIC(NumReplaced , "Number of exit values replaced"); |
62 | | STATISTIC(NumLFTR , "Number of loop exit tests replaced"); |
63 | | STATISTIC(NumElimExt , "Number of IV sign/zero extends eliminated"); |
64 | | STATISTIC(NumElimIV , "Number of congruent IVs eliminated"); |
65 | | |
66 | | // Trip count verification can be enabled by default under NDEBUG if we |
67 | | // implement a strong expression equivalence checker in SCEV. Until then, we |
68 | | // use the verify-indvars flag, which may assert in some cases. |
69 | | static cl::opt<bool> VerifyIndvars( |
70 | | "verify-indvars", cl::Hidden, |
71 | | cl::desc("Verify the ScalarEvolution result after running indvars")); |
72 | | |
73 | | enum ReplaceExitVal { NeverRepl, OnlyCheapRepl, AlwaysRepl }; |
74 | | |
75 | | static cl::opt<ReplaceExitVal> ReplaceExitValue( |
76 | | "replexitval", cl::Hidden, cl::init(OnlyCheapRepl), |
77 | | cl::desc("Choose the strategy to replace exit value in IndVarSimplify"), |
78 | | cl::values(clEnumValN(NeverRepl, "never", "never replace exit value"), |
79 | | clEnumValN(OnlyCheapRepl, "cheap", |
80 | | "only replace exit value when the cost is cheap"), |
81 | | clEnumValN(AlwaysRepl, "always", |
82 | | "always replace exit value whenever possible"))); |
83 | | |
84 | | static cl::opt<bool> UsePostIncrementRanges( |
85 | | "indvars-post-increment-ranges", cl::Hidden, |
86 | | cl::desc("Use post increment control-dependent ranges in IndVarSimplify"), |
87 | | cl::init(true)); |
88 | | |
89 | | static cl::opt<bool> |
90 | | DisableLFTR("disable-lftr", cl::Hidden, cl::init(false), |
91 | | cl::desc("Disable Linear Function Test Replace optimization")); |
92 | | |
93 | | namespace { |
94 | | struct RewritePhi; |
95 | | |
96 | | class IndVarSimplify { |
97 | | LoopInfo *LI; |
98 | | ScalarEvolution *SE; |
99 | | DominatorTree *DT; |
100 | | const DataLayout &DL; |
101 | | TargetLibraryInfo *TLI; |
102 | | const TargetTransformInfo *TTI; |
103 | | |
104 | | SmallVector<WeakTrackingVH, 16> DeadInsts; |
105 | | bool Changed = false; |
106 | | |
107 | | bool isValidRewrite(Value *FromVal, Value *ToVal); |
108 | | |
109 | | void handleFloatingPointIV(Loop *L, PHINode *PH); |
110 | | void rewriteNonIntegerIVs(Loop *L); |
111 | | |
112 | | void simplifyAndExtend(Loop *L, SCEVExpander &Rewriter, LoopInfo *LI); |
113 | | |
114 | | bool canLoopBeDeleted(Loop *L, SmallVector<RewritePhi, 8> &RewritePhiSet); |
115 | | void rewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter); |
116 | | void rewriteFirstIterationLoopExitValues(Loop *L); |
117 | | |
118 | | Value *linearFunctionTestReplace(Loop *L, const SCEV *BackedgeTakenCount, |
119 | | PHINode *IndVar, SCEVExpander &Rewriter); |
120 | | |
121 | | void sinkUnusedInvariants(Loop *L); |
122 | | |
123 | | Value *expandSCEVIfNeeded(SCEVExpander &Rewriter, const SCEV *S, Loop *L, |
124 | | Instruction *InsertPt, Type *Ty); |
125 | | |
126 | | public: |
127 | | IndVarSimplify(LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, |
128 | | const DataLayout &DL, TargetLibraryInfo *TLI, |
129 | | TargetTransformInfo *TTI) |
130 | 364k | : LI(LI), SE(SE), DT(DT), DL(DL), TLI(TLI), TTI(TTI) {} |
131 | | |
132 | | bool run(Loop *L); |
133 | | }; |
134 | | } |
135 | | |
136 | | /// Return true if the SCEV expansion generated by the rewriter can replace the |
137 | | /// original value. SCEV guarantees that it produces the same value, but the way |
138 | | /// it is produced may be illegal IR. Ideally, this function will only be |
139 | | /// called for verification. |
140 | 4.07k | bool IndVarSimplify::isValidRewrite(Value *FromVal, Value *ToVal) { |
141 | 4.07k | // If an SCEV expression subsumed multiple pointers, its expansion could |
142 | 4.07k | // reassociate the GEP changing the base pointer. This is illegal because the |
143 | 4.07k | // final address produced by a GEP chain must be inbounds relative to its |
144 | 4.07k | // underlying object. Otherwise basic alias analysis, among other things, |
145 | 4.07k | // could fail in a dangerous way. Ultimately, SCEV will be improved to avoid |
146 | 4.07k | // producing an expression involving multiple pointers. Until then, we must |
147 | 4.07k | // bail out here. |
148 | 4.07k | // |
149 | 4.07k | // Retrieve the pointer operand of the GEP. Don't use GetUnderlyingObject |
150 | 4.07k | // because it understands lcssa phis while SCEV does not. |
151 | 4.07k | Value *FromPtr = FromVal; |
152 | 4.07k | Value *ToPtr = ToVal; |
153 | 4.07k | if (auto *GEP4.07k = dyn_cast<GEPOperator>(FromVal)) { |
154 | 924 | FromPtr = GEP->getPointerOperand(); |
155 | 924 | } |
156 | 4.07k | if (auto *GEP4.07k = dyn_cast<GEPOperator>(ToVal)) { |
157 | 909 | ToPtr = GEP->getPointerOperand(); |
158 | 909 | } |
159 | 4.07k | if (FromPtr != FromVal || 4.07k ToPtr != ToVal3.14k ) { |
160 | 976 | // Quickly check the common case |
161 | 976 | if (FromPtr == ToPtr) |
162 | 18 | return true; |
163 | 958 | |
164 | 958 | // SCEV may have rewritten an expression that produces the GEP's pointer |
165 | 958 | // operand. That's ok as long as the pointer operand has the same base |
166 | 958 | // pointer. Unlike GetUnderlyingObject(), getPointerBase() will find the |
167 | 958 | // base of a recurrence. This handles the case in which SCEV expansion |
168 | 958 | // converts a pointer type recurrence into a nonrecurrent pointer base |
169 | 958 | // indexed by an integer recurrence. |
170 | 958 | |
171 | 958 | // If the GEP base pointer is a vector of pointers, abort. |
172 | 958 | if (958 !FromPtr->getType()->isPointerTy() || 958 !ToPtr->getType()->isPointerTy()958 ) |
173 | 0 | return false; |
174 | 958 | |
175 | 958 | const SCEV *FromBase = SE->getPointerBase(SE->getSCEV(FromPtr)); |
176 | 958 | const SCEV *ToBase = SE->getPointerBase(SE->getSCEV(ToPtr)); |
177 | 958 | if (FromBase == ToBase) |
178 | 938 | return true; |
179 | 20 | |
180 | 20 | DEBUG20 (dbgs() << "INDVARS: GEP rewrite bail out " |
181 | 20 | << *FromBase << " != " << *ToBase << "\n"); |
182 | 20 | |
183 | 20 | return false; |
184 | 20 | } |
185 | 3.09k | return true; |
186 | 3.09k | } |
187 | | |
188 | | /// Determine the insertion point for this user. By default, insert immediately |
189 | | /// before the user. SCEVExpander or LICM will hoist loop invariants out of the |
190 | | /// loop. For PHI nodes, there may be multiple uses, so compute the nearest |
191 | | /// common dominator for the incoming blocks. |
192 | | static Instruction *getInsertPointForUses(Instruction *User, Value *Def, |
193 | 43.5k | DominatorTree *DT, LoopInfo *LI) { |
194 | 43.5k | PHINode *PHI = dyn_cast<PHINode>(User); |
195 | 43.5k | if (!PHI) |
196 | 42.3k | return User; |
197 | 1.13k | |
198 | 1.13k | Instruction *InsertPt = nullptr; |
199 | 3.65k | for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e3.65k ; ++i2.52k ) { |
200 | 2.52k | if (PHI->getIncomingValue(i) != Def) |
201 | 1.13k | continue; |
202 | 1.38k | |
203 | 1.38k | BasicBlock *InsertBB = PHI->getIncomingBlock(i); |
204 | 1.38k | if (!InsertPt1.38k ) { |
205 | 1.13k | InsertPt = InsertBB->getTerminator(); |
206 | 1.13k | continue; |
207 | 1.13k | } |
208 | 256 | InsertBB = DT->findNearestCommonDominator(InsertPt->getParent(), InsertBB); |
209 | 256 | InsertPt = InsertBB->getTerminator(); |
210 | 256 | } |
211 | 1.13k | assert(InsertPt && "Missing phi operand"); |
212 | 1.13k | |
213 | 1.13k | auto *DefI = dyn_cast<Instruction>(Def); |
214 | 1.13k | if (!DefI) |
215 | 0 | return InsertPt; |
216 | 1.13k | |
217 | 1.13k | assert(DT->dominates(DefI, InsertPt) && "def does not dominate all uses"); |
218 | 1.13k | |
219 | 1.13k | auto *L = LI->getLoopFor(DefI->getParent()); |
220 | 1.13k | assert(!L || L->contains(LI->getLoopFor(InsertPt->getParent()))); |
221 | 1.13k | |
222 | 1.24k | for (auto *DTN = (*DT)[InsertPt->getParent()]; DTN1.24k ; DTN = DTN->getIDom()113 ) |
223 | 1.24k | if (1.24k LI->getLoopFor(DTN->getBlock()) == L1.24k ) |
224 | 1.13k | return DTN->getBlock()->getTerminator(); |
225 | 1.13k | |
226 | 0 | llvm_unreachable0 ("DefI dominates InsertPt!"); |
227 | 0 | } |
228 | | |
229 | | //===----------------------------------------------------------------------===// |
230 | | // rewriteNonIntegerIVs and helpers. Prefer integer IVs. |
231 | | //===----------------------------------------------------------------------===// |
232 | | |
233 | | /// Convert APF to an integer, if possible. |
234 | 5.09k | static bool ConvertToSInt(const APFloat &APF, int64_t &IntVal) { |
235 | 5.09k | bool isExact = false; |
236 | 5.09k | // See if we can convert this to an int64_t |
237 | 5.09k | uint64_t UIntVal; |
238 | 5.09k | if (APF.convertToInteger(makeMutableArrayRef(UIntVal), 64, true, |
239 | 5.09k | APFloat::rmTowardZero, &isExact) != APFloat::opOK || |
240 | 4.96k | !isExact) |
241 | 130 | return false; |
242 | 4.96k | IntVal = UIntVal; |
243 | 4.96k | return true; |
244 | 4.96k | } |
245 | | |
246 | | /// If the loop has floating induction variable then insert corresponding |
247 | | /// integer induction variable if possible. |
248 | | /// For example, |
249 | | /// for(double i = 0; i < 10000; ++i) |
250 | | /// bar(i) |
251 | | /// is converted into |
252 | | /// for(int i = 0; i < 10000; ++i) |
253 | | /// bar((double)i); |
254 | | /// |
255 | 487k | void IndVarSimplify::handleFloatingPointIV(Loop *L, PHINode *PN) { |
256 | 487k | unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0)); |
257 | 487k | unsigned BackEdge = IncomingEdge^1; |
258 | 487k | |
259 | 487k | // Check incoming value. |
260 | 487k | auto *InitValueVal = dyn_cast<ConstantFP>(PN->getIncomingValue(IncomingEdge)); |
261 | 487k | |
262 | 487k | int64_t InitValue; |
263 | 487k | if (!InitValueVal || 487k !ConvertToSInt(InitValueVal->getValueAPF(), InitValue)4.99k ) |
264 | 482k | return; |
265 | 4.88k | |
266 | 4.88k | // Check IV increment. Reject this PN if increment operation is not |
267 | 4.88k | // an add or increment value can not be represented by an integer. |
268 | 4.88k | auto *Incr = dyn_cast<BinaryOperator>(PN->getIncomingValue(BackEdge)); |
269 | 4.88k | if (Incr == nullptr || 4.88k Incr->getOpcode() != Instruction::FAdd2.19k ) return2.83k ; |
270 | 2.05k | |
271 | 2.05k | // If this is not an add of the PHI with a constantfp, or if the constant fp |
272 | 2.05k | // is not an integer, bail out. |
273 | 2.05k | ConstantFP *IncValueVal = dyn_cast<ConstantFP>(Incr->getOperand(1)); |
274 | 2.05k | int64_t IncValue; |
275 | 2.05k | if (IncValueVal == nullptr || 2.05k Incr->getOperand(0) != PN92 || |
276 | 88 | !ConvertToSInt(IncValueVal->getValueAPF(), IncValue)) |
277 | 1.99k | return; |
278 | 62 | |
279 | 62 | // Check Incr uses. One user is PN and the other user is an exit condition |
280 | 62 | // used by the conditional terminator. |
281 | 62 | Value::user_iterator IncrUse = Incr->user_begin(); |
282 | 62 | Instruction *U1 = cast<Instruction>(*IncrUse++); |
283 | 62 | if (IncrUse == Incr->user_end()62 ) return4 ; |
284 | 58 | Instruction *U2 = cast<Instruction>(*IncrUse++); |
285 | 58 | if (IncrUse != Incr->user_end()58 ) return8 ; |
286 | 50 | |
287 | 50 | // Find exit condition, which is an fcmp. If it doesn't exist, or if it isn't |
288 | 50 | // only used by a branch, we can't transform it. |
289 | 50 | FCmpInst *Compare = dyn_cast<FCmpInst>(U1); |
290 | 50 | if (!Compare) |
291 | 39 | Compare = dyn_cast<FCmpInst>(U2); |
292 | 50 | if (!Compare || 50 !Compare->hasOneUse()13 || |
293 | 12 | !isa<BranchInst>(Compare->user_back())) |
294 | 38 | return; |
295 | 12 | |
296 | 12 | BranchInst *TheBr = cast<BranchInst>(Compare->user_back()); |
297 | 12 | |
298 | 12 | // We need to verify that the branch actually controls the iteration count |
299 | 12 | // of the loop. If not, the new IV can overflow and no one will notice. |
300 | 12 | // The branch block must be in the loop and one of the successors must be out |
301 | 12 | // of the loop. |
302 | 12 | assert(TheBr->isConditional() && "Can't use fcmp if not conditional"); |
303 | 12 | if (!L->contains(TheBr->getParent()) || |
304 | 12 | (L->contains(TheBr->getSuccessor(0)) && |
305 | 7 | L->contains(TheBr->getSuccessor(1)))) |
306 | 0 | return; |
307 | 12 | |
308 | 12 | |
309 | 12 | // If it isn't a comparison with an integer-as-fp (the exit value), we can't |
310 | 12 | // transform it. |
311 | 12 | ConstantFP *ExitValueVal = dyn_cast<ConstantFP>(Compare->getOperand(1)); |
312 | 12 | int64_t ExitValue; |
313 | 12 | if (ExitValueVal == nullptr || |
314 | 12 | !ConvertToSInt(ExitValueVal->getValueAPF(), ExitValue)) |
315 | 0 | return; |
316 | 12 | |
317 | 12 | // Find new predicate for integer comparison. |
318 | 12 | CmpInst::Predicate NewPred = CmpInst::BAD_ICMP_PREDICATE; |
319 | 12 | switch (Compare->getPredicate()) { |
320 | 0 | default: return; // Unknown comparison. |
321 | 0 | case CmpInst::FCMP_OEQ: |
322 | 0 | case CmpInst::FCMP_UEQ: NewPred = CmpInst::ICMP_EQ; break; |
323 | 0 | case CmpInst::FCMP_ONE: |
324 | 0 | case CmpInst::FCMP_UNE: NewPred = CmpInst::ICMP_NE; break; |
325 | 4 | case CmpInst::FCMP_OGT: |
326 | 4 | case CmpInst::FCMP_UGT: NewPred = CmpInst::ICMP_SGT; break; |
327 | 0 | case CmpInst::FCMP_OGE: |
328 | 0 | case CmpInst::FCMP_UGE: NewPred = CmpInst::ICMP_SGE; break; |
329 | 8 | case CmpInst::FCMP_OLT: |
330 | 8 | case CmpInst::FCMP_ULT: NewPred = CmpInst::ICMP_SLT; break; |
331 | 0 | case CmpInst::FCMP_OLE: |
332 | 0 | case CmpInst::FCMP_ULE: NewPred = CmpInst::ICMP_SLE; break; |
333 | 12 | } |
334 | 12 | |
335 | 12 | // We convert the floating point induction variable to a signed i32 value if |
336 | 12 | // we can. This is only safe if the comparison will not overflow in a way |
337 | 12 | // that won't be trapped by the integer equivalent operations. Check for this |
338 | 12 | // now. |
339 | 12 | // TODO: We could use i64 if it is native and the range requires it. |
340 | 12 | |
341 | 12 | // The start/stride/exit values must all fit in signed i32. |
342 | 12 | if (12 !isInt<32>(InitValue) || 12 !isInt<32>(IncValue)12 || !isInt<32>(ExitValue)12 ) |
343 | 0 | return; |
344 | 12 | |
345 | 12 | // If not actually striding (add x, 0.0), avoid touching the code. |
346 | 12 | if (12 IncValue == 012 ) |
347 | 0 | return; |
348 | 12 | |
349 | 12 | // Positive and negative strides have different safety conditions. |
350 | 12 | if (12 IncValue > 012 ) { |
351 | 10 | // If we have a positive stride, we require the init to be less than the |
352 | 10 | // exit value. |
353 | 10 | if (InitValue >= ExitValue) |
354 | 1 | return; |
355 | 9 | |
356 | 9 | uint32_t Range = uint32_t(ExitValue-InitValue); |
357 | 9 | // Check for infinite loop, either: |
358 | 9 | // while (i <= Exit) or until (i > Exit) |
359 | 9 | if (NewPred == CmpInst::ICMP_SLE || 9 NewPred == CmpInst::ICMP_SGT9 ) { |
360 | 4 | if (++Range == 04 ) return0 ; // Range overflows. |
361 | 9 | } |
362 | 9 | |
363 | 9 | unsigned Leftover = Range % uint32_t(IncValue); |
364 | 9 | |
365 | 9 | // If this is an equality comparison, we require that the strided value |
366 | 9 | // exactly land on the exit value, otherwise the IV condition will wrap |
367 | 9 | // around and do things the fp IV wouldn't. |
368 | 9 | if ((NewPred == CmpInst::ICMP_EQ || 9 NewPred == CmpInst::ICMP_NE9 ) && |
369 | 0 | Leftover != 0) |
370 | 0 | return; |
371 | 9 | |
372 | 9 | // If the stride would wrap around the i32 before exiting, we can't |
373 | 9 | // transform the IV. |
374 | 9 | if (9 Leftover != 0 && 9 int32_t(ExitValue+IncValue) < ExitValue1 ) |
375 | 0 | return; |
376 | 12 | |
377 | 2 | } else { |
378 | 2 | // If we have a negative stride, we require the init to be greater than the |
379 | 2 | // exit value. |
380 | 2 | if (InitValue <= ExitValue) |
381 | 0 | return; |
382 | 2 | |
383 | 2 | uint32_t Range = uint32_t(InitValue-ExitValue); |
384 | 2 | // Check for infinite loop, either: |
385 | 2 | // while (i >= Exit) or until (i < Exit) |
386 | 2 | if (NewPred == CmpInst::ICMP_SGE || 2 NewPred == CmpInst::ICMP_SLT2 ) { |
387 | 2 | if (++Range == 02 ) return0 ; // Range overflows. |
388 | 2 | } |
389 | 2 | |
390 | 2 | unsigned Leftover = Range % uint32_t(-IncValue); |
391 | 2 | |
392 | 2 | // If this is an equality comparison, we require that the strided value |
393 | 2 | // exactly land on the exit value, otherwise the IV condition will wrap |
394 | 2 | // around and do things the fp IV wouldn't. |
395 | 2 | if ((NewPred == CmpInst::ICMP_EQ || 2 NewPred == CmpInst::ICMP_NE2 ) && |
396 | 0 | Leftover != 0) |
397 | 0 | return; |
398 | 2 | |
399 | 2 | // If the stride would wrap around the i32 before exiting, we can't |
400 | 2 | // transform the IV. |
401 | 2 | if (2 Leftover != 0 && 2 int32_t(ExitValue+IncValue) > ExitValue0 ) |
402 | 0 | return; |
403 | 11 | } |
404 | 11 | |
405 | 11 | IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext()); |
406 | 11 | |
407 | 11 | // Insert new integer induction variable. |
408 | 11 | PHINode *NewPHI = PHINode::Create(Int32Ty, 2, PN->getName()+".int", PN); |
409 | 11 | NewPHI->addIncoming(ConstantInt::get(Int32Ty, InitValue), |
410 | 11 | PN->getIncomingBlock(IncomingEdge)); |
411 | 11 | |
412 | 11 | Value *NewAdd = |
413 | 11 | BinaryOperator::CreateAdd(NewPHI, ConstantInt::get(Int32Ty, IncValue), |
414 | 11 | Incr->getName()+".int", Incr); |
415 | 11 | NewPHI->addIncoming(NewAdd, PN->getIncomingBlock(BackEdge)); |
416 | 11 | |
417 | 11 | ICmpInst *NewCompare = new ICmpInst(TheBr, NewPred, NewAdd, |
418 | 11 | ConstantInt::get(Int32Ty, ExitValue), |
419 | 11 | Compare->getName()); |
420 | 11 | |
421 | 11 | // In the following deletions, PN may become dead and may be deleted. |
422 | 11 | // Use a WeakTrackingVH to observe whether this happens. |
423 | 11 | WeakTrackingVH WeakPH = PN; |
424 | 11 | |
425 | 11 | // Delete the old floating point exit comparison. The branch starts using the |
426 | 11 | // new comparison. |
427 | 11 | NewCompare->takeName(Compare); |
428 | 11 | Compare->replaceAllUsesWith(NewCompare); |
429 | 11 | RecursivelyDeleteTriviallyDeadInstructions(Compare, TLI); |
430 | 11 | |
431 | 11 | // Delete the old floating point increment. |
432 | 11 | Incr->replaceAllUsesWith(UndefValue::get(Incr->getType())); |
433 | 11 | RecursivelyDeleteTriviallyDeadInstructions(Incr, TLI); |
434 | 11 | |
435 | 11 | // If the FP induction variable still has uses, this is because something else |
436 | 11 | // in the loop uses its value. In order to canonicalize the induction |
437 | 11 | // variable, we chose to eliminate the IV and rewrite it in terms of an |
438 | 11 | // int->fp cast. |
439 | 11 | // |
440 | 11 | // We give preference to sitofp over uitofp because it is faster on most |
441 | 11 | // platforms. |
442 | 11 | if (WeakPH11 ) { |
443 | 9 | Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv", |
444 | 9 | &*PN->getParent()->getFirstInsertionPt()); |
445 | 9 | PN->replaceAllUsesWith(Conv); |
446 | 9 | RecursivelyDeleteTriviallyDeadInstructions(PN, TLI); |
447 | 9 | } |
448 | 487k | Changed = true; |
449 | 487k | } |
450 | | |
451 | 364k | void IndVarSimplify::rewriteNonIntegerIVs(Loop *L) { |
452 | 364k | // First step. Check to see if there are any floating-point recurrences. |
453 | 364k | // If there are, change them into integer recurrences, permitting analysis by |
454 | 364k | // the SCEV routines. |
455 | 364k | // |
456 | 364k | BasicBlock *Header = L->getHeader(); |
457 | 364k | |
458 | 364k | SmallVector<WeakTrackingVH, 8> PHIs; |
459 | 364k | for (BasicBlock::iterator I = Header->begin(); |
460 | 851k | PHINode *PN851k = dyn_cast<PHINode>(I); ++I487k ) |
461 | 487k | PHIs.push_back(PN); |
462 | 364k | |
463 | 851k | for (unsigned i = 0, e = PHIs.size(); i != e851k ; ++i487k ) |
464 | 487k | if (PHINode *487k PN487k = dyn_cast_or_null<PHINode>(&*PHIs[i])) |
465 | 487k | handleFloatingPointIV(L, PN); |
466 | 364k | |
467 | 364k | // If the loop previously had floating-point IV, ScalarEvolution |
468 | 364k | // may not have been able to compute a trip count. Now that we've done some |
469 | 364k | // re-writing, the trip count may be computable. |
470 | 364k | if (Changed) |
471 | 11 | SE->forgetLoop(L); |
472 | 364k | } |
473 | | |
474 | | namespace { |
475 | | // Collect information about PHI nodes which can be transformed in |
476 | | // rewriteLoopExitValues. |
477 | | struct RewritePhi { |
478 | | PHINode *PN; |
479 | | unsigned Ith; // Ith incoming value. |
480 | | Value *Val; // Exit value after expansion. |
481 | | bool HighCost; // High Cost when expansion. |
482 | | |
483 | | RewritePhi(PHINode *P, unsigned I, Value *V, bool H) |
484 | 4.05k | : PN(P), Ith(I), Val(V), HighCost(H) {} |
485 | | }; |
486 | | } |
487 | | |
488 | | Value *IndVarSimplify::expandSCEVIfNeeded(SCEVExpander &Rewriter, const SCEV *S, |
489 | | Loop *L, Instruction *InsertPt, |
490 | 4.07k | Type *ResultTy) { |
491 | 4.07k | // Before expanding S into an expensive LLVM expression, see if we can use an |
492 | 4.07k | // already existing value as the expansion for S. |
493 | 4.07k | if (Value *ExistingValue = Rewriter.getExactExistingExpansion(S, InsertPt, L)) |
494 | 128 | if (128 ExistingValue->getType() == ResultTy128 ) |
495 | 125 | return ExistingValue; |
496 | 3.94k | |
497 | 3.94k | // We didn't find anything, fall back to using SCEVExpander. |
498 | 3.94k | return Rewriter.expandCodeFor(S, ResultTy, InsertPt); |
499 | 3.94k | } |
500 | | |
501 | | //===----------------------------------------------------------------------===// |
502 | | // rewriteLoopExitValues - Optimize IV users outside the loop. |
503 | | // As a side effect, reduces the amount of IV processing within the loop. |
504 | | //===----------------------------------------------------------------------===// |
505 | | |
506 | | /// Check to see if this loop has a computable loop-invariant execution count. |
507 | | /// If so, this means that we can compute the final value of any expressions |
508 | | /// that are recurrent in the loop, and substitute the exit values from the loop |
509 | | /// into any instructions outside of the loop that use the final values of the |
510 | | /// current expressions. |
511 | | /// |
512 | | /// This is mostly redundant with the regular IndVarSimplify activities that |
513 | | /// happen later, except that it's more powerful in some cases, because it's |
514 | | /// able to brute-force evaluate arbitrary instructions as long as they have |
515 | | /// constant operands at the beginning of the loop. |
516 | 141k | void IndVarSimplify::rewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) { |
517 | 141k | // Check a pre-condition. |
518 | 141k | assert(L->isRecursivelyLCSSAForm(*DT, *LI) && |
519 | 141k | "Indvars did not preserve LCSSA!"); |
520 | 141k | |
521 | 141k | SmallVector<BasicBlock*, 8> ExitBlocks; |
522 | 141k | L->getUniqueExitBlocks(ExitBlocks); |
523 | 141k | |
524 | 141k | SmallVector<RewritePhi, 8> RewritePhiSet; |
525 | 141k | // Find all values that are computed inside the loop, but used outside of it. |
526 | 141k | // Because of LCSSA, these values will only occur in LCSSA PHI Nodes. Scan |
527 | 141k | // the exit blocks of the loop to find them. |
528 | 141k | for (BasicBlock *ExitBB : ExitBlocks) { |
529 | 141k | // If there are no PHI nodes in this exit block, then no values defined |
530 | 141k | // inside the loop are used on this path, skip it. |
531 | 141k | PHINode *PN = dyn_cast<PHINode>(ExitBB->begin()); |
532 | 141k | if (!PN141k ) continue118k ; |
533 | 23.1k | |
534 | 23.1k | unsigned NumPreds = PN->getNumIncomingValues(); |
535 | 23.1k | |
536 | 23.1k | // Iterate over all of the PHI nodes. |
537 | 23.1k | BasicBlock::iterator BBI = ExitBB->begin(); |
538 | 54.3k | while ((PN = dyn_cast<PHINode>(BBI++))54.3k ) { |
539 | 31.2k | if (PN->use_empty()) |
540 | 1.31k | continue; // dead use, don't replace it |
541 | 29.9k | |
542 | 29.9k | if (29.9k !SE->isSCEVable(PN->getType())29.9k ) |
543 | 5.80k | continue; |
544 | 24.1k | |
545 | 24.1k | // It's necessary to tell ScalarEvolution about this explicitly so that |
546 | 24.1k | // it can walk the def-use list and forget all SCEVs, as it may not be |
547 | 24.1k | // watching the PHI itself. Once the new exit value is in place, there |
548 | 24.1k | // may not be a def-use connection between the loop and every instruction |
549 | 24.1k | // which got a SCEVAddRecExpr for that loop. |
550 | 24.1k | SE->forgetValue(PN); |
551 | 24.1k | |
552 | 24.1k | // Iterate over all of the values in all the PHI nodes. |
553 | 48.2k | for (unsigned i = 0; i != NumPreds48.2k ; ++i24.1k ) { |
554 | 24.1k | // If the value being merged in is not integer or is not defined |
555 | 24.1k | // in the loop, skip it. |
556 | 24.1k | Value *InVal = PN->getIncomingValue(i); |
557 | 24.1k | if (!isa<Instruction>(InVal)) |
558 | 16 | continue; |
559 | 24.1k | |
560 | 24.1k | // If this pred is for a subloop, not L itself, skip it. |
561 | 24.1k | if (24.1k LI->getLoopFor(PN->getIncomingBlock(i)) != L24.1k ) |
562 | 0 | continue; // The Block is in a subloop, skip it. |
563 | 24.1k | |
564 | 24.1k | // Check that InVal is defined in the loop. |
565 | 24.1k | Instruction *Inst = cast<Instruction>(InVal); |
566 | 24.1k | if (!L->contains(Inst)) |
567 | 1 | continue; |
568 | 24.1k | |
569 | 24.1k | // Okay, this instruction has a user outside of the current loop |
570 | 24.1k | // and varies predictably *inside* the loop. Evaluate the value it |
571 | 24.1k | // contains when the loop exits, if possible. |
572 | 24.1k | const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop()); |
573 | 24.1k | if (!SE->isLoopInvariant(ExitValue, L) || |
574 | 4.08k | !isSafeToExpand(ExitValue, *SE)) |
575 | 20.0k | continue; |
576 | 4.07k | |
577 | 4.07k | // Computing the value outside of the loop brings no benefit if : |
578 | 4.07k | // - it is definitely used inside the loop in a way which can not be |
579 | 4.07k | // optimized away. |
580 | 4.07k | // - no use outside of the loop can take advantage of hoisting the |
581 | 4.07k | // computation out of the loop |
582 | 4.07k | if (4.07k ExitValue->getSCEVType()>=scMulExpr4.07k ) { |
583 | 170 | unsigned NumHardInternalUses = 0; |
584 | 170 | unsigned NumSoftExternalUses = 0; |
585 | 170 | unsigned NumUses = 0; |
586 | 170 | for (auto IB = Inst->user_begin(), IE = Inst->user_end(); |
587 | 655 | IB != IE && 655 NumUses <= 6485 ; ++IB485 ) { |
588 | 485 | Instruction *UseInstr = cast<Instruction>(*IB); |
589 | 485 | unsigned Opc = UseInstr->getOpcode(); |
590 | 485 | NumUses++; |
591 | 485 | if (L->contains(UseInstr)485 ) { |
592 | 311 | if (Opc == Instruction::Call || 311 Opc == Instruction::Ret301 ) |
593 | 10 | NumHardInternalUses++; |
594 | 485 | } else { |
595 | 174 | if (Opc == Instruction::PHI174 ) { |
596 | 174 | // Do not count the Phi as a use. LCSSA may have inserted |
597 | 174 | // plenty of trivial ones. |
598 | 174 | NumUses--; |
599 | 174 | for (auto PB = UseInstr->user_begin(), |
600 | 174 | PE = UseInstr->user_end(); |
601 | 354 | PB != PE && 354 NumUses <= 6180 ; ++PB, ++NumUses180 ) { |
602 | 180 | unsigned PhiOpc = cast<Instruction>(*PB)->getOpcode(); |
603 | 180 | if (PhiOpc != Instruction::Call && 180 PhiOpc != Instruction::Ret179 ) |
604 | 169 | NumSoftExternalUses++; |
605 | 180 | } |
606 | 174 | continue; |
607 | 174 | } |
608 | 0 | if (0 Opc != Instruction::Call && 0 Opc != Instruction::Ret0 ) |
609 | 0 | NumSoftExternalUses++; |
610 | 174 | } |
611 | 485 | } |
612 | 170 | if (NumUses <= 6 && 170 NumHardInternalUses170 && !NumSoftExternalUses6 ) |
613 | 2 | continue; |
614 | 4.07k | } |
615 | 4.07k | |
616 | 4.07k | bool HighCost = Rewriter.isHighCostExpansion(ExitValue, L, Inst); |
617 | 4.07k | Value *ExitVal = |
618 | 4.07k | expandSCEVIfNeeded(Rewriter, ExitValue, L, Inst, PN->getType()); |
619 | 4.07k | |
620 | 4.07k | DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal << '\n' |
621 | 4.07k | << " LoopVal = " << *Inst << "\n"); |
622 | 4.07k | |
623 | 4.07k | if (!isValidRewrite(Inst, ExitVal)4.07k ) { |
624 | 20 | DeadInsts.push_back(ExitVal); |
625 | 20 | continue; |
626 | 20 | } |
627 | 4.05k | |
628 | 4.05k | // Collect all the candidate PHINodes to be rewritten. |
629 | 4.05k | RewritePhiSet.emplace_back(PN, i, ExitVal, HighCost); |
630 | 4.05k | } |
631 | 31.2k | } |
632 | 141k | } |
633 | 141k | |
634 | 141k | bool LoopCanBeDel = canLoopBeDeleted(L, RewritePhiSet); |
635 | 141k | |
636 | 141k | // Transformation. |
637 | 4.05k | for (const RewritePhi &Phi : RewritePhiSet) { |
638 | 4.05k | PHINode *PN = Phi.PN; |
639 | 4.05k | Value *ExitVal = Phi.Val; |
640 | 4.05k | |
641 | 4.05k | // Only do the rewrite when the ExitValue can be expanded cheaply. |
642 | 4.05k | // If LoopCanBeDel is true, rewrite exit value aggressively. |
643 | 4.05k | if (ReplaceExitValue == OnlyCheapRepl && 4.05k !LoopCanBeDel4.04k && Phi.HighCost3.30k ) { |
644 | 877 | DeadInsts.push_back(ExitVal); |
645 | 877 | continue; |
646 | 877 | } |
647 | 3.17k | |
648 | 3.17k | Changed = true; |
649 | 3.17k | ++NumReplaced; |
650 | 3.17k | Instruction *Inst = cast<Instruction>(PN->getIncomingValue(Phi.Ith)); |
651 | 3.17k | PN->setIncomingValue(Phi.Ith, ExitVal); |
652 | 3.17k | |
653 | 3.17k | // If this instruction is dead now, delete it. Don't do it now to avoid |
654 | 3.17k | // invalidating iterators. |
655 | 3.17k | if (isInstructionTriviallyDead(Inst, TLI)) |
656 | 15 | DeadInsts.push_back(Inst); |
657 | 3.17k | |
658 | 3.17k | // Replace PN with ExitVal if that is legal and does not break LCSSA. |
659 | 3.17k | if (PN->getNumIncomingValues() == 1 && |
660 | 3.17k | LI->replacementPreservesLCSSAForm(PN, ExitVal)3.17k ) { |
661 | 3.16k | PN->replaceAllUsesWith(ExitVal); |
662 | 3.16k | PN->eraseFromParent(); |
663 | 3.16k | } |
664 | 4.05k | } |
665 | 141k | |
666 | 141k | // The insertion point instruction may have been deleted; clear it out |
667 | 141k | // so that the rewriter doesn't trip over it later. |
668 | 141k | Rewriter.clearInsertPoint(); |
669 | 141k | } |
670 | | |
671 | | //===---------------------------------------------------------------------===// |
672 | | // rewriteFirstIterationLoopExitValues: Rewrite loop exit values if we know |
673 | | // they will exit at the first iteration. |
674 | | //===---------------------------------------------------------------------===// |
675 | | |
676 | | /// Check to see if this loop has loop invariant conditions which lead to loop |
677 | | /// exits. If so, we know that if the exit path is taken, it is at the first |
678 | | /// loop iteration. This lets us predict exit values of PHI nodes that live in |
679 | | /// loop header. |
680 | 364k | void IndVarSimplify::rewriteFirstIterationLoopExitValues(Loop *L) { |
681 | 364k | // Verify the input to the pass is already in LCSSA form. |
682 | 364k | assert(L->isLCSSAForm(*DT)); |
683 | 364k | |
684 | 364k | SmallVector<BasicBlock *, 8> ExitBlocks; |
685 | 364k | L->getUniqueExitBlocks(ExitBlocks); |
686 | 364k | auto *LoopHeader = L->getHeader(); |
687 | 364k | assert(LoopHeader && "Invalid loop"); |
688 | 364k | |
689 | 445k | for (auto *ExitBB : ExitBlocks) { |
690 | 445k | BasicBlock::iterator BBI = ExitBB->begin(); |
691 | 445k | // If there are no more PHI nodes in this exit block, then no more |
692 | 445k | // values defined inside the loop are used on this path. |
693 | 662k | while (auto *PN662k = dyn_cast<PHINode>(BBI++)) { |
694 | 216k | for (unsigned IncomingValIdx = 0, E = PN->getNumIncomingValues(); |
695 | 460k | IncomingValIdx != E460k ; ++IncomingValIdx244k ) { |
696 | 244k | auto *IncomingBB = PN->getIncomingBlock(IncomingValIdx); |
697 | 244k | |
698 | 244k | // We currently only support loop exits from loop header. If the |
699 | 244k | // incoming block is not loop header, we need to recursively check |
700 | 244k | // all conditions starting from loop header are loop invariants. |
701 | 244k | // Additional support might be added in the future. |
702 | 244k | if (IncomingBB != LoopHeader) |
703 | 123k | continue; |
704 | 120k | |
705 | 120k | // Get condition that leads to the exit path. |
706 | 120k | auto *TermInst = IncomingBB->getTerminator(); |
707 | 120k | |
708 | 120k | Value *Cond = nullptr; |
709 | 120k | if (auto *BI120k = dyn_cast<BranchInst>(TermInst)) { |
710 | 114k | // Must be a conditional branch, otherwise the block |
711 | 114k | // should not be in the loop. |
712 | 114k | Cond = BI->getCondition(); |
713 | 120k | } else if (auto *5.46k SI5.46k = dyn_cast<SwitchInst>(TermInst)) |
714 | 5.20k | Cond = SI->getCondition(); |
715 | 5.46k | else |
716 | 261 | continue; |
717 | 120k | |
718 | 120k | if (120k !L->isLoopInvariant(Cond)120k ) |
719 | 120k | continue; |
720 | 40 | |
721 | 40 | auto *ExitVal = |
722 | 40 | dyn_cast<PHINode>(PN->getIncomingValue(IncomingValIdx)); |
723 | 40 | |
724 | 40 | // Only deal with PHIs. |
725 | 40 | if (!ExitVal) |
726 | 15 | continue; |
727 | 25 | |
728 | 25 | // If ExitVal is a PHI on the loop header, then we know its |
729 | 25 | // value along this exit because the exit can only be taken |
730 | 25 | // on the first iteration. |
731 | 25 | auto *LoopPreheader = L->getLoopPreheader(); |
732 | 25 | assert(LoopPreheader && "Invalid loop"); |
733 | 25 | int PreheaderIdx = ExitVal->getBasicBlockIndex(LoopPreheader); |
734 | 25 | if (PreheaderIdx != -125 ) { |
735 | 24 | assert(ExitVal->getParent() == LoopHeader && |
736 | 24 | "ExitVal must be in loop header"); |
737 | 24 | PN->setIncomingValue(IncomingValIdx, |
738 | 24 | ExitVal->getIncomingValue(PreheaderIdx)); |
739 | 24 | } |
740 | 244k | } |
741 | 216k | } |
742 | 445k | } |
743 | 364k | } |
744 | | |
745 | | /// Check whether it is possible to delete the loop after rewriting exit |
746 | | /// value. If it is possible, ignore ReplaceExitValue and do rewriting |
747 | | /// aggressively. |
748 | | bool IndVarSimplify::canLoopBeDeleted( |
749 | 141k | Loop *L, SmallVector<RewritePhi, 8> &RewritePhiSet) { |
750 | 141k | |
751 | 141k | BasicBlock *Preheader = L->getLoopPreheader(); |
752 | 141k | // If there is no preheader, the loop will not be deleted. |
753 | 141k | if (!Preheader) |
754 | 0 | return false; |
755 | 141k | |
756 | 141k | // In LoopDeletion pass Loop can be deleted when ExitingBlocks.size() > 1. |
757 | 141k | // We obviate multiple ExitingBlocks case for simplicity. |
758 | 141k | // TODO: If we see testcase with multiple ExitingBlocks can be deleted |
759 | 141k | // after exit value rewriting, we can enhance the logic here. |
760 | 141k | SmallVector<BasicBlock *, 4> ExitingBlocks; |
761 | 141k | L->getExitingBlocks(ExitingBlocks); |
762 | 141k | SmallVector<BasicBlock *, 8> ExitBlocks; |
763 | 141k | L->getUniqueExitBlocks(ExitBlocks); |
764 | 141k | if (ExitBlocks.size() > 1 || 141k ExitingBlocks.size() > 1141k ) |
765 | 25 | return false; |
766 | 141k | |
767 | 141k | BasicBlock *ExitBlock = ExitBlocks[0]; |
768 | 141k | BasicBlock::iterator BI = ExitBlock->begin(); |
769 | 144k | while (PHINode *P144k = dyn_cast<PHINode>(BI)) { |
770 | 24.7k | Value *Incoming = P->getIncomingValueForBlock(ExitingBlocks[0]); |
771 | 24.7k | |
772 | 24.7k | // If the Incoming value of P is found in RewritePhiSet, we know it |
773 | 24.7k | // could be rewritten to use a loop invariant value in transformation |
774 | 24.7k | // phase later. Skip it in the loop invariant check below. |
775 | 24.7k | bool found = false; |
776 | 6.04k | for (const RewritePhi &Phi : RewritePhiSet) { |
777 | 6.04k | unsigned i = Phi.Ith; |
778 | 6.04k | if (Phi.PN == P && 6.04k (Phi.PN)->getIncomingValue(i) == Incoming3.15k ) { |
779 | 3.15k | found = true; |
780 | 3.15k | break; |
781 | 3.15k | } |
782 | 24.7k | } |
783 | 24.7k | |
784 | 24.7k | Instruction *I; |
785 | 24.7k | if (!found && 24.7k (I = dyn_cast<Instruction>(Incoming))21.5k ) |
786 | 21.5k | if (21.5k !L->hasLoopInvariantOperands(I)21.5k ) |
787 | 21.2k | return false; |
788 | 3.50k | |
789 | 3.50k | ++BI; |
790 | 3.50k | } |
791 | 141k | |
792 | 119k | for (auto *BB : L->blocks()) |
793 | 1.02M | if (151k any_of(*BB, [](Instruction &I) 151k { return I.mayHaveSideEffects(); }1.02M )) |
794 | 110k | return false; |
795 | 8.94k | |
796 | 8.94k | return true; |
797 | 8.94k | } |
798 | | |
799 | | //===----------------------------------------------------------------------===// |
800 | | // IV Widening - Extend the width of an IV to cover its widest uses. |
801 | | //===----------------------------------------------------------------------===// |
802 | | |
803 | | namespace { |
804 | | // Collect information about induction variables that are used by sign/zero |
805 | | // extend operations. This information is recorded by CollectExtend and provides |
806 | | // the input to WidenIV. |
807 | | struct WideIVInfo { |
808 | | PHINode *NarrowIV = nullptr; |
809 | | Type *WidestNativeType = nullptr; // Widest integer type created [sz]ext |
810 | | bool IsSigned = false; // Was a sext user seen before a zext? |
811 | | }; |
812 | | } |
813 | | |
814 | | /// Update information about the induction variable that is extended by this |
815 | | /// sign or zero extend operation. This is used to determine the final width of |
816 | | /// the IV before actually widening it. |
817 | | static void visitIVCast(CastInst *Cast, WideIVInfo &WI, ScalarEvolution *SE, |
818 | 226k | const TargetTransformInfo *TTI) { |
819 | 226k | bool IsSigned = Cast->getOpcode() == Instruction::SExt; |
820 | 226k | if (!IsSigned && 226k Cast->getOpcode() != Instruction::ZExt177k ) |
821 | 167k | return; |
822 | 59.6k | |
823 | 59.6k | Type *Ty = Cast->getType(); |
824 | 59.6k | uint64_t Width = SE->getTypeSizeInBits(Ty); |
825 | 59.6k | if (!Cast->getModule()->getDataLayout().isLegalInteger(Width)) |
826 | 77 | return; |
827 | 59.5k | |
828 | 59.5k | // Check that `Cast` actually extends the induction variable (we rely on this |
829 | 59.5k | // later). This takes care of cases where `Cast` is extending a truncation of |
830 | 59.5k | // the narrow induction variable, and thus can end up being narrower than the |
831 | 59.5k | // "narrow" induction variable. |
832 | 59.5k | uint64_t NarrowIVWidth = SE->getTypeSizeInBits(WI.NarrowIV->getType()); |
833 | 59.5k | if (NarrowIVWidth >= Width) |
834 | 1 | return; |
835 | 59.5k | |
836 | 59.5k | // Cast is either an sext or zext up to this point. |
837 | 59.5k | // We should not widen an indvar if arithmetics on the wider indvar are more |
838 | 59.5k | // expensive than those on the narrower indvar. We check only the cost of ADD |
839 | 59.5k | // because at least an ADD is required to increment the induction variable. We |
840 | 59.5k | // could compute more comprehensively the cost of all instructions on the |
841 | 59.5k | // induction variable when necessary. |
842 | 59.5k | if (59.5k TTI && |
843 | 59.5k | TTI->getArithmeticInstrCost(Instruction::Add, Ty) > |
844 | 59.5k | TTI->getArithmeticInstrCost(Instruction::Add, |
845 | 59.5k | Cast->getOperand(0)->getType())) { |
846 | 3 | return; |
847 | 3 | } |
848 | 59.5k | |
849 | 59.5k | if (59.5k !WI.WidestNativeType59.5k ) { |
850 | 43.7k | WI.WidestNativeType = SE->getEffectiveSCEVType(Ty); |
851 | 43.7k | WI.IsSigned = IsSigned; |
852 | 43.7k | return; |
853 | 43.7k | } |
854 | 15.8k | |
855 | 15.8k | // We extend the IV to satisfy the sign of its first user, arbitrarily. |
856 | 15.8k | if (15.8k WI.IsSigned != IsSigned15.8k ) |
857 | 305 | return; |
858 | 15.5k | |
859 | 15.5k | if (15.5k Width > SE->getTypeSizeInBits(WI.WidestNativeType)15.5k ) |
860 | 84 | WI.WidestNativeType = SE->getEffectiveSCEVType(Ty); |
861 | 226k | } |
862 | | |
863 | | namespace { |
864 | | |
865 | | /// Record a link in the Narrow IV def-use chain along with the WideIV that |
866 | | /// computes the same value as the Narrow IV def. This avoids caching Use* |
867 | | /// pointers. |
868 | | struct NarrowIVDefUse { |
869 | | Instruction *NarrowDef = nullptr; |
870 | | Instruction *NarrowUse = nullptr; |
871 | | Instruction *WideDef = nullptr; |
872 | | |
873 | | // True if the narrow def is never negative. Tracking this information lets |
874 | | // us use a sign extension instead of a zero extension or vice versa, when |
875 | | // profitable and legal. |
876 | | bool NeverNegative = false; |
877 | | |
878 | | NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD, |
879 | | bool NeverNegative) |
880 | | : NarrowDef(ND), NarrowUse(NU), WideDef(WD), |
881 | 140k | NeverNegative(NeverNegative) {} |
882 | | }; |
883 | | |
884 | | /// The goal of this transform is to remove sign and zero extends without |
885 | | /// creating any new induction variables. To do this, it creates a new phi of |
886 | | /// the wider type and redirects all users, either removing extends or inserting |
887 | | /// truncs whenever we stop propagating the type. |
888 | | /// |
889 | | class WidenIV { |
890 | | // Parameters |
891 | | PHINode *OrigPhi; |
892 | | Type *WideType; |
893 | | |
894 | | // Context |
895 | | LoopInfo *LI; |
896 | | Loop *L; |
897 | | ScalarEvolution *SE; |
898 | | DominatorTree *DT; |
899 | | |
900 | | // Does the module have any calls to the llvm.experimental.guard intrinsic |
901 | | // at all? If not we can avoid scanning instructions looking for guards. |
902 | | bool HasGuards; |
903 | | |
904 | | // Result |
905 | | PHINode *WidePhi; |
906 | | Instruction *WideInc; |
907 | | const SCEV *WideIncExpr; |
908 | | SmallVectorImpl<WeakTrackingVH> &DeadInsts; |
909 | | |
910 | | SmallPtrSet<Instruction *,16> Widened; |
911 | | SmallVector<NarrowIVDefUse, 8> NarrowIVUsers; |
912 | | |
913 | | enum ExtendKind { ZeroExtended, SignExtended, Unknown }; |
914 | | // A map tracking the kind of extension used to widen each narrow IV |
915 | | // and narrow IV user. |
916 | | // Key: pointer to a narrow IV or IV user. |
917 | | // Value: the kind of extension used to widen this Instruction. |
918 | | DenseMap<AssertingVH<Instruction>, ExtendKind> ExtendKindMap; |
919 | | |
920 | | typedef std::pair<AssertingVH<Value>, AssertingVH<Instruction>> DefUserPair; |
921 | | // A map with control-dependent ranges for post increment IV uses. The key is |
922 | | // a pair of IV def and a use of this def denoting the context. The value is |
923 | | // a ConstantRange representing possible values of the def at the given |
924 | | // context. |
925 | | DenseMap<DefUserPair, ConstantRange> PostIncRangeInfos; |
926 | | |
927 | | Optional<ConstantRange> getPostIncRangeInfo(Value *Def, |
928 | 34.9k | Instruction *UseI) { |
929 | 34.9k | DefUserPair Key(Def, UseI); |
930 | 34.9k | auto It = PostIncRangeInfos.find(Key); |
931 | 34.9k | return It == PostIncRangeInfos.end() |
932 | 34.8k | ? Optional<ConstantRange>(None) |
933 | 96 | : Optional<ConstantRange>(It->second); |
934 | 34.9k | } |
935 | | |
936 | | void calculatePostIncRanges(PHINode *OrigPhi); |
937 | | void calculatePostIncRange(Instruction *NarrowDef, Instruction *NarrowUser); |
938 | 215 | void updatePostIncRangeInfo(Value *Def, Instruction *UseI, ConstantRange R) { |
939 | 215 | DefUserPair Key(Def, UseI); |
940 | 215 | auto It = PostIncRangeInfos.find(Key); |
941 | 215 | if (It == PostIncRangeInfos.end()) |
942 | 186 | PostIncRangeInfos.insert({Key, R}); |
943 | 215 | else |
944 | 29 | It->second = R.intersectWith(It->second); |
945 | 215 | } |
946 | | |
947 | | public: |
948 | | WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv, |
949 | | DominatorTree *DTree, SmallVectorImpl<WeakTrackingVH> &DI, |
950 | | bool HasGuards) |
951 | | : OrigPhi(WI.NarrowIV), WideType(WI.WidestNativeType), LI(LInfo), |
952 | | L(LI->getLoopFor(OrigPhi->getParent())), SE(SEv), DT(DTree), |
953 | | HasGuards(HasGuards), WidePhi(nullptr), WideInc(nullptr), |
954 | 43.7k | WideIncExpr(nullptr), DeadInsts(DI) { |
955 | 43.7k | assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV"); |
956 | 43.7k | ExtendKindMap[OrigPhi] = WI.IsSigned ? SignExtended34.9k : ZeroExtended8.79k ; |
957 | 43.7k | } |
958 | | |
959 | | PHINode *createWideIV(SCEVExpander &Rewriter); |
960 | | |
961 | | protected: |
962 | | Value *createExtendInst(Value *NarrowOper, Type *WideType, bool IsSigned, |
963 | | Instruction *Use); |
964 | | |
965 | | Instruction *cloneIVUser(NarrowIVDefUse DU, const SCEVAddRecExpr *WideAR); |
966 | | Instruction *cloneArithmeticIVUser(NarrowIVDefUse DU, |
967 | | const SCEVAddRecExpr *WideAR); |
968 | | Instruction *cloneBitwiseIVUser(NarrowIVDefUse DU); |
969 | | |
970 | | ExtendKind getExtendKind(Instruction *I); |
971 | | |
972 | | typedef std::pair<const SCEVAddRecExpr *, ExtendKind> WidenedRecTy; |
973 | | |
974 | | WidenedRecTy getWideRecurrence(NarrowIVDefUse DU); |
975 | | |
976 | | WidenedRecTy getExtendedOperandRecurrence(NarrowIVDefUse DU); |
977 | | |
978 | | const SCEV *getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS, |
979 | | unsigned OpCode) const; |
980 | | |
981 | | Instruction *widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter); |
982 | | |
983 | | bool widenLoopCompare(NarrowIVDefUse DU); |
984 | | |
985 | | void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef); |
986 | | }; |
987 | | } // anonymous namespace |
988 | | |
989 | | /// Perform a quick domtree based check for loop invariance assuming that V is |
990 | | /// used within the loop. LoopInfo::isLoopInvariant() seems gratuitous for this |
991 | | /// purpose. |
992 | 341k | static bool isLoopInvariant(Value *V, const Loop *L, const DominatorTree *DT) { |
993 | 341k | Instruction *Inst = dyn_cast<Instruction>(V); |
994 | 341k | if (!Inst) |
995 | 294k | return true; |
996 | 47.1k | |
997 | 47.1k | return DT->properlyDominates(Inst->getParent(), L->getHeader()); |
998 | 47.1k | } |
999 | | |
1000 | | Value *WidenIV::createExtendInst(Value *NarrowOper, Type *WideType, |
1001 | 46.3k | bool IsSigned, Instruction *Use) { |
1002 | 46.3k | // Set the debug location and conservative insertion point. |
1003 | 46.3k | IRBuilder<> Builder(Use); |
1004 | 46.3k | // Hoist the insertion point into loop preheaders as far as possible. |
1005 | 46.3k | for (const Loop *L = LI->getLoopFor(Use->getParent()); |
1006 | 109k | L && 109k L->getLoopPreheader()72.0k && isLoopInvariant(NarrowOper, L, DT)72.0k ; |
1007 | 63.3k | L = L->getParentLoop()) |
1008 | 63.3k | Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator()); |
1009 | 46.3k | |
1010 | 32.4k | return IsSigned ? Builder.CreateSExt(NarrowOper, WideType) : |
1011 | 13.8k | Builder.CreateZExt(NarrowOper, WideType); |
1012 | 46.3k | } |
1013 | | |
1014 | | /// Instantiate a wide operation to replace a narrow operation. This only needs |
1015 | | /// to handle operations that can evaluation to SCEVAddRec. It can safely return |
1016 | | /// 0 for any operation we decide not to clone. |
1017 | | Instruction *WidenIV::cloneIVUser(NarrowIVDefUse DU, |
1018 | 16.1k | const SCEVAddRecExpr *WideAR) { |
1019 | 16.1k | unsigned Opcode = DU.NarrowUse->getOpcode(); |
1020 | 16.1k | switch (Opcode) { |
1021 | 308 | default: |
1022 | 308 | return nullptr; |
1023 | 13.2k | case Instruction::Add: |
1024 | 13.2k | case Instruction::Mul: |
1025 | 13.2k | case Instruction::UDiv: |
1026 | 13.2k | case Instruction::Sub: |
1027 | 13.2k | return cloneArithmeticIVUser(DU, WideAR); |
1028 | 13.2k | |
1029 | 2.64k | case Instruction::And: |
1030 | 2.64k | case Instruction::Or: |
1031 | 2.64k | case Instruction::Xor: |
1032 | 2.64k | case Instruction::Shl: |
1033 | 2.64k | case Instruction::LShr: |
1034 | 2.64k | case Instruction::AShr: |
1035 | 2.64k | return cloneBitwiseIVUser(DU); |
1036 | 0 | } |
1037 | 0 | } |
1038 | | |
1039 | 2.64k | Instruction *WidenIV::cloneBitwiseIVUser(NarrowIVDefUse DU) { |
1040 | 2.64k | Instruction *NarrowUse = DU.NarrowUse; |
1041 | 2.64k | Instruction *NarrowDef = DU.NarrowDef; |
1042 | 2.64k | Instruction *WideDef = DU.WideDef; |
1043 | 2.64k | |
1044 | 2.64k | DEBUG(dbgs() << "Cloning bitwise IVUser: " << *NarrowUse << "\n"); |
1045 | 2.64k | |
1046 | 2.64k | // Replace NarrowDef operands with WideDef. Otherwise, we don't know anything |
1047 | 2.64k | // about the narrow operand yet so must insert a [sz]ext. It is probably loop |
1048 | 2.64k | // invariant and will be folded or hoisted. If it actually comes from a |
1049 | 2.64k | // widened IV, it should be removed during a future call to widenIVUse. |
1050 | 2.64k | bool IsSigned = getExtendKind(NarrowDef) == SignExtended; |
1051 | 2.64k | Value *LHS = (NarrowUse->getOperand(0) == NarrowDef) |
1052 | 2.64k | ? WideDef |
1053 | 0 | : createExtendInst(NarrowUse->getOperand(0), WideType, |
1054 | 0 | IsSigned, NarrowUse); |
1055 | 2.64k | Value *RHS = (NarrowUse->getOperand(1) == NarrowDef) |
1056 | 0 | ? WideDef |
1057 | 2.64k | : createExtendInst(NarrowUse->getOperand(1), WideType, |
1058 | 2.64k | IsSigned, NarrowUse); |
1059 | 2.64k | |
1060 | 2.64k | auto *NarrowBO = cast<BinaryOperator>(NarrowUse); |
1061 | 2.64k | auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS, |
1062 | 2.64k | NarrowBO->getName()); |
1063 | 2.64k | IRBuilder<> Builder(NarrowUse); |
1064 | 2.64k | Builder.Insert(WideBO); |
1065 | 2.64k | WideBO->copyIRFlags(NarrowBO); |
1066 | 2.64k | return WideBO; |
1067 | 2.64k | } |
1068 | | |
1069 | | Instruction *WidenIV::cloneArithmeticIVUser(NarrowIVDefUse DU, |
1070 | 13.2k | const SCEVAddRecExpr *WideAR) { |
1071 | 13.2k | Instruction *NarrowUse = DU.NarrowUse; |
1072 | 13.2k | Instruction *NarrowDef = DU.NarrowDef; |
1073 | 13.2k | Instruction *WideDef = DU.WideDef; |
1074 | 13.2k | |
1075 | 13.2k | DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n"); |
1076 | 13.2k | |
1077 | 13.2k | unsigned IVOpIdx = (NarrowUse->getOperand(0) == NarrowDef) ? 011.8k : 11.39k ; |
1078 | 13.2k | |
1079 | 13.2k | // We're trying to find X such that |
1080 | 13.2k | // |
1081 | 13.2k | // Widen(NarrowDef `op` NonIVNarrowDef) == WideAR == WideDef `op.wide` X |
1082 | 13.2k | // |
1083 | 13.2k | // We guess two solutions to X, sext(NonIVNarrowDef) and zext(NonIVNarrowDef), |
1084 | 13.2k | // and check using SCEV if any of them are correct. |
1085 | 13.2k | |
1086 | 13.2k | // Returns true if extending NonIVNarrowDef according to `SignExt` is a |
1087 | 13.2k | // correct solution to X. |
1088 | 13.3k | auto GuessNonIVOperand = [&](bool SignExt) { |
1089 | 13.3k | const SCEV *WideLHS; |
1090 | 13.3k | const SCEV *WideRHS; |
1091 | 13.3k | |
1092 | 13.3k | auto GetExtend = [this, SignExt](const SCEV *S, Type *Ty) { |
1093 | 13.3k | if (SignExt) |
1094 | 12.9k | return SE->getSignExtendExpr(S, Ty); |
1095 | 375 | return SE->getZeroExtendExpr(S, Ty); |
1096 | 375 | }; |
1097 | 13.3k | |
1098 | 13.3k | if (IVOpIdx == 013.3k ) { |
1099 | 11.9k | WideLHS = SE->getSCEV(WideDef); |
1100 | 11.9k | const SCEV *NarrowRHS = SE->getSCEV(NarrowUse->getOperand(1)); |
1101 | 11.9k | WideRHS = GetExtend(NarrowRHS, WideType); |
1102 | 13.3k | } else { |
1103 | 1.40k | const SCEV *NarrowLHS = SE->getSCEV(NarrowUse->getOperand(0)); |
1104 | 1.40k | WideLHS = GetExtend(NarrowLHS, WideType); |
1105 | 1.40k | WideRHS = SE->getSCEV(WideDef); |
1106 | 1.40k | } |
1107 | 13.3k | |
1108 | 13.3k | // WideUse is "WideDef `op.wide` X" as described in the comment. |
1109 | 13.3k | const SCEV *WideUse = nullptr; |
1110 | 13.3k | |
1111 | 13.3k | switch (NarrowUse->getOpcode()) { |
1112 | 0 | default: |
1113 | 0 | llvm_unreachable("No other possibility!"); |
1114 | 13.3k | |
1115 | 11.4k | case Instruction::Add: |
1116 | 11.4k | WideUse = SE->getAddExpr(WideLHS, WideRHS); |
1117 | 11.4k | break; |
1118 | 13.3k | |
1119 | 802 | case Instruction::Mul: |
1120 | 802 | WideUse = SE->getMulExpr(WideLHS, WideRHS); |
1121 | 802 | break; |
1122 | 13.3k | |
1123 | 8 | case Instruction::UDiv: |
1124 | 8 | WideUse = SE->getUDivExpr(WideLHS, WideRHS); |
1125 | 8 | break; |
1126 | 13.3k | |
1127 | 1.07k | case Instruction::Sub: |
1128 | 1.07k | WideUse = SE->getMinusSCEV(WideLHS, WideRHS); |
1129 | 1.07k | break; |
1130 | 13.3k | } |
1131 | 13.3k | |
1132 | 13.3k | return WideUse == WideAR; |
1133 | 13.3k | }; |
1134 | 13.2k | |
1135 | 13.2k | bool SignExtend = getExtendKind(NarrowDef) == SignExtended; |
1136 | 13.2k | if (!GuessNonIVOperand(SignExtend)13.2k ) { |
1137 | 123 | SignExtend = !SignExtend; |
1138 | 123 | if (!GuessNonIVOperand(SignExtend)) |
1139 | 86 | return nullptr; |
1140 | 13.1k | } |
1141 | 13.1k | |
1142 | 13.1k | Value *LHS = (NarrowUse->getOperand(0) == NarrowDef) |
1143 | 11.7k | ? WideDef |
1144 | 1.38k | : createExtendInst(NarrowUse->getOperand(0), WideType, |
1145 | 1.38k | SignExtend, NarrowUse); |
1146 | 13.1k | Value *RHS = (NarrowUse->getOperand(1) == NarrowDef) |
1147 | 1.60k | ? WideDef |
1148 | 11.5k | : createExtendInst(NarrowUse->getOperand(1), WideType, |
1149 | 11.5k | SignExtend, NarrowUse); |
1150 | 13.2k | |
1151 | 13.2k | auto *NarrowBO = cast<BinaryOperator>(NarrowUse); |
1152 | 13.2k | auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS, |
1153 | 13.2k | NarrowBO->getName()); |
1154 | 13.2k | |
1155 | 13.2k | IRBuilder<> Builder(NarrowUse); |
1156 | 13.2k | Builder.Insert(WideBO); |
1157 | 13.2k | WideBO->copyIRFlags(NarrowBO); |
1158 | 13.2k | return WideBO; |
1159 | 13.2k | } |
1160 | | |
1161 | 156k | WidenIV::ExtendKind WidenIV::getExtendKind(Instruction *I) { |
1162 | 156k | auto It = ExtendKindMap.find(I); |
1163 | 156k | assert(It != ExtendKindMap.end() && "Instruction not yet extended!"); |
1164 | 156k | return It->second; |
1165 | 156k | } |
1166 | | |
1167 | | const SCEV *WidenIV::getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS, |
1168 | 46.2k | unsigned OpCode) const { |
1169 | 46.2k | if (OpCode == Instruction::Add) |
1170 | 44.0k | return SE->getAddExpr(LHS, RHS); |
1171 | 2.12k | if (2.12k OpCode == Instruction::Sub2.12k ) |
1172 | 1.17k | return SE->getMinusSCEV(LHS, RHS); |
1173 | 944 | if (944 OpCode == Instruction::Mul944 ) |
1174 | 944 | return SE->getMulExpr(LHS, RHS); |
1175 | 0 |
|
1176 | 0 | llvm_unreachable0 ("Unsupported opcode."); |
1177 | 0 | } |
1178 | | |
1179 | | /// No-wrap operations can transfer sign extension of their result to their |
1180 | | /// operands. Generate the SCEV value for the widened operation without |
1181 | | /// actually modifying the IR yet. If the expression after extending the |
1182 | | /// operands is an AddRec for this loop, return the AddRec and the kind of |
1183 | | /// extension used. |
1184 | 91.0k | WidenIV::WidenedRecTy WidenIV::getExtendedOperandRecurrence(NarrowIVDefUse DU) { |
1185 | 91.0k | |
1186 | 91.0k | // Handle the common case of add<nsw/nuw> |
1187 | 91.0k | const unsigned OpCode = DU.NarrowUse->getOpcode(); |
1188 | 91.0k | // Only Add/Sub/Mul instructions supported yet. |
1189 | 91.0k | if (OpCode != Instruction::Add && 91.0k OpCode != Instruction::Sub46.3k && |
1190 | 44.8k | OpCode != Instruction::Mul) |
1191 | 43.8k | return {nullptr, Unknown}; |
1192 | 47.1k | |
1193 | 47.1k | // One operand (NarrowDef) has already been extended to WideDef. Now determine |
1194 | 47.1k | // if extending the other will lead to a recurrence. |
1195 | 47.1k | const unsigned ExtendOperIdx = |
1196 | 47.1k | DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 144.6k : 02.53k ; |
1197 | 47.1k | assert(DU.NarrowUse->getOperand(1-ExtendOperIdx) == DU.NarrowDef && "bad DU"); |
1198 | 47.1k | |
1199 | 47.1k | const SCEV *ExtendOperExpr = nullptr; |
1200 | 47.1k | const OverflowingBinaryOperator *OBO = |
1201 | 47.1k | cast<OverflowingBinaryOperator>(DU.NarrowUse); |
1202 | 47.1k | ExtendKind ExtKind = getExtendKind(DU.NarrowDef); |
1203 | 47.1k | if (ExtKind == SignExtended && 47.1k OBO->hasNoSignedWrap()44.5k ) |
1204 | 43.9k | ExtendOperExpr = SE->getSignExtendExpr( |
1205 | 43.9k | SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType); |
1206 | 3.25k | else if(3.25k ExtKind == ZeroExtended && 3.25k OBO->hasNoUnsignedWrap()2.67k ) |
1207 | 2.27k | ExtendOperExpr = SE->getZeroExtendExpr( |
1208 | 2.27k | SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType); |
1209 | 3.25k | else |
1210 | 978 | return {nullptr, Unknown}; |
1211 | 46.2k | |
1212 | 46.2k | // When creating this SCEV expr, don't apply the current operations NSW or NUW |
1213 | 46.2k | // flags. This instruction may be guarded by control flow that the no-wrap |
1214 | 46.2k | // behavior depends on. Non-control-equivalent instructions can be mapped to |
1215 | 46.2k | // the same SCEV expression, and it would be incorrect to transfer NSW/NUW |
1216 | 46.2k | // semantics to those operations. |
1217 | 46.2k | const SCEV *lhs = SE->getSCEV(DU.WideDef); |
1218 | 46.2k | const SCEV *rhs = ExtendOperExpr; |
1219 | 46.2k | |
1220 | 46.2k | // Let's swap operands to the initial order for the case of non-commutative |
1221 | 46.2k | // operations, like SUB. See PR21014. |
1222 | 46.2k | if (ExtendOperIdx == 0) |
1223 | 2.12k | std::swap(lhs, rhs); |
1224 | 46.2k | const SCEVAddRecExpr *AddRec = |
1225 | 46.2k | dyn_cast<SCEVAddRecExpr>(getSCEVByOpCode(lhs, rhs, OpCode)); |
1226 | 46.2k | |
1227 | 46.2k | if (!AddRec || 46.2k AddRec->getLoop() != L45.3k ) |
1228 | 914 | return {nullptr, Unknown}; |
1229 | 45.2k | |
1230 | 45.2k | return {AddRec, ExtKind}; |
1231 | 45.2k | } |
1232 | | |
1233 | | /// Is this instruction potentially interesting for further simplification after |
1234 | | /// widening it's type? In other words, can the extend be safely hoisted out of |
1235 | | /// the loop with SCEV reducing the value to a recurrence on the same loop. If |
1236 | | /// so, return the extended recurrence and the kind of extension used. Otherwise |
1237 | | /// return {nullptr, Unknown}. |
1238 | 45.7k | WidenIV::WidenedRecTy WidenIV::getWideRecurrence(NarrowIVDefUse DU) { |
1239 | 45.7k | if (!SE->isSCEVable(DU.NarrowUse->getType())) |
1240 | 3.85k | return {nullptr, Unknown}; |
1241 | 41.9k | |
1242 | 41.9k | const SCEV *NarrowExpr = SE->getSCEV(DU.NarrowUse); |
1243 | 41.9k | if (SE->getTypeSizeInBits(NarrowExpr->getType()) >= |
1244 | 41.9k | SE->getTypeSizeInBits(WideType)) { |
1245 | 372 | // NarrowUse implicitly widens its operand. e.g. a gep with a narrow |
1246 | 372 | // index. So don't follow this use. |
1247 | 372 | return {nullptr, Unknown}; |
1248 | 372 | } |
1249 | 41.5k | |
1250 | 41.5k | const SCEV *WideExpr; |
1251 | 41.5k | ExtendKind ExtKind; |
1252 | 41.5k | if (DU.NeverNegative41.5k ) { |
1253 | 30.2k | WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType); |
1254 | 30.2k | if (isa<SCEVAddRecExpr>(WideExpr)) |
1255 | 2.72k | ExtKind = SignExtended; |
1256 | 27.5k | else { |
1257 | 27.5k | WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType); |
1258 | 27.5k | ExtKind = ZeroExtended; |
1259 | 27.5k | } |
1260 | 41.5k | } else if (11.2k getExtendKind(DU.NarrowDef) == SignExtended11.2k ) { |
1261 | 9.87k | WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType); |
1262 | 9.87k | ExtKind = SignExtended; |
1263 | 11.2k | } else { |
1264 | 1.38k | WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType); |
1265 | 1.38k | ExtKind = ZeroExtended; |
1266 | 1.38k | } |
1267 | 41.5k | const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr); |
1268 | 41.5k | if (!AddRec || 41.5k AddRec->getLoop() != L3.16k ) |
1269 | 38.4k | return {nullptr, Unknown}; |
1270 | 3.14k | return {AddRec, ExtKind}; |
1271 | 3.14k | } |
1272 | | |
1273 | | /// This IV user cannot be widen. Replace this use of the original narrow IV |
1274 | | /// with a truncation of the new wide IV to isolate and eliminate the narrow IV. |
1275 | 12.7k | static void truncateIVUse(NarrowIVDefUse DU, DominatorTree *DT, LoopInfo *LI) { |
1276 | 12.7k | DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef |
1277 | 12.7k | << " for user " << *DU.NarrowUse << "\n"); |
1278 | 12.7k | IRBuilder<> Builder( |
1279 | 12.7k | getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI)); |
1280 | 12.7k | Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType()); |
1281 | 12.7k | DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc); |
1282 | 12.7k | } |
1283 | | |
1284 | | /// If the narrow use is a compare instruction, then widen the compare |
1285 | | // (and possibly the other operand). The extend operation is hoisted into the |
1286 | | // loop preheader as far as possible. |
1287 | 42.6k | bool WidenIV::widenLoopCompare(NarrowIVDefUse DU) { |
1288 | 42.6k | ICmpInst *Cmp = dyn_cast<ICmpInst>(DU.NarrowUse); |
1289 | 42.6k | if (!Cmp) |
1290 | 10.1k | return false; |
1291 | 32.5k | |
1292 | 32.5k | // We can legally widen the comparison in the following two cases: |
1293 | 32.5k | // |
1294 | 32.5k | // - The signedness of the IV extension and comparison match |
1295 | 32.5k | // |
1296 | 32.5k | // - The narrow IV is always positive (and thus its sign extension is equal |
1297 | 32.5k | // to its zero extension). For instance, let's say we're zero extending |
1298 | 32.5k | // %narrow for the following use |
1299 | 32.5k | // |
1300 | 32.5k | // icmp slt i32 %narrow, %val ... (A) |
1301 | 32.5k | // |
1302 | 32.5k | // and %narrow is always positive. Then |
1303 | 32.5k | // |
1304 | 32.5k | // (A) == icmp slt i32 sext(%narrow), sext(%val) |
1305 | 32.5k | // == icmp slt i32 zext(%narrow), sext(%val) |
1306 | 32.5k | bool IsSigned = getExtendKind(DU.NarrowDef) == SignExtended; |
1307 | 32.5k | if (!(DU.NeverNegative || 32.5k IsSigned == Cmp->isSigned()9.08k )) |
1308 | 1.77k | return false; |
1309 | 30.7k | |
1310 | 30.7k | Value *Op = Cmp->getOperand(Cmp->getOperand(0) == DU.NarrowDef ? 30.7k 130.3k : 0408 ); |
1311 | 30.7k | unsigned CastWidth = SE->getTypeSizeInBits(Op->getType()); |
1312 | 30.7k | unsigned IVWidth = SE->getTypeSizeInBits(WideType); |
1313 | 30.7k | assert (CastWidth <= IVWidth && "Unexpected width while widening compare."); |
1314 | 30.7k | |
1315 | 30.7k | // Widen the compare instruction. |
1316 | 30.7k | IRBuilder<> Builder( |
1317 | 30.7k | getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI)); |
1318 | 30.7k | DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef); |
1319 | 30.7k | |
1320 | 30.7k | // Widen the other operand of the compare, if necessary. |
1321 | 30.7k | if (CastWidth < IVWidth30.7k ) { |
1322 | 30.7k | Value *ExtOp = createExtendInst(Op, WideType, Cmp->isSigned(), Cmp); |
1323 | 30.7k | DU.NarrowUse->replaceUsesOfWith(Op, ExtOp); |
1324 | 30.7k | } |
1325 | 42.6k | return true; |
1326 | 42.6k | } |
1327 | | |
1328 | | /// Determine whether an individual user of the narrow IV can be widened. If so, |
1329 | | /// return the wide clone of the user. |
1330 | 140k | Instruction *WidenIV::widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) { |
1331 | 140k | assert(ExtendKindMap.count(DU.NarrowDef) && |
1332 | 140k | "Should already know the kind of extension used to widen NarrowDef"); |
1333 | 140k | |
1334 | 140k | // Stop traversing the def-use chain at inner-loop phis or post-loop phis. |
1335 | 140k | if (PHINode *UsePhi140k = dyn_cast<PHINode>(DU.NarrowUse)) { |
1336 | 4.39k | if (LI->getLoopFor(UsePhi->getParent()) != L4.39k ) { |
1337 | 4.00k | // For LCSSA phis, sink the truncate outside the loop. |
1338 | 4.00k | // After SimplifyCFG most loop exit targets have a single predecessor. |
1339 | 4.00k | // Otherwise fall back to a truncate within the loop. |
1340 | 4.00k | if (UsePhi->getNumOperands() != 1) |
1341 | 886 | truncateIVUse(DU, DT, LI); |
1342 | 3.11k | else { |
1343 | 3.11k | // Widening the PHI requires us to insert a trunc. The logical place |
1344 | 3.11k | // for this trunc is in the same BB as the PHI. This is not possible if |
1345 | 3.11k | // the BB is terminated by a catchswitch. |
1346 | 3.11k | if (isa<CatchSwitchInst>(UsePhi->getParent()->getTerminator())) |
1347 | 1 | return nullptr; |
1348 | 3.11k | |
1349 | 3.11k | PHINode *WidePhi = |
1350 | 3.11k | PHINode::Create(DU.WideDef->getType(), 1, UsePhi->getName() + ".wide", |
1351 | 3.11k | UsePhi); |
1352 | 3.11k | WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0)); |
1353 | 3.11k | IRBuilder<> Builder(&*WidePhi->getParent()->getFirstInsertionPt()); |
1354 | 3.11k | Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType()); |
1355 | 3.11k | UsePhi->replaceAllUsesWith(Trunc); |
1356 | 3.11k | DeadInsts.emplace_back(UsePhi); |
1357 | 3.11k | DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi |
1358 | 3.11k | << " to " << *WidePhi << "\n"); |
1359 | 3.11k | } |
1360 | 4.00k | return nullptr; |
1361 | 136k | } |
1362 | 4.39k | } |
1363 | 136k | |
1364 | 136k | // This narrow use can be widened by a sext if it's non-negative or its narrow |
1365 | 136k | // def was widended by a sext. Same for zext. |
1366 | 136k | auto canWidenBySExt = [&]() 136k { |
1367 | 10.5k | return DU.NeverNegative || getExtendKind(DU.NarrowDef) == SignExtended; |
1368 | 43.0k | }; |
1369 | 2.63k | auto canWidenByZExt = [&]() { |
1370 | 1.11k | return DU.NeverNegative || getExtendKind(DU.NarrowDef) == ZeroExtended; |
1371 | 2.63k | }; |
1372 | 136k | |
1373 | 136k | // Our raison d'etre! Eliminate sign and zero extension. |
1374 | 136k | if ((isa<SExtInst>(DU.NarrowUse) && 136k canWidenBySExt()43.0k ) || |
1375 | 136k | (isa<ZExtInst>(DU.NarrowUse) && 93.6k canWidenByZExt()2.63k )) { |
1376 | 45.6k | Value *NewDef = DU.WideDef; |
1377 | 45.6k | if (DU.NarrowUse->getType() != WideType45.6k ) { |
1378 | 8 | unsigned CastWidth = SE->getTypeSizeInBits(DU.NarrowUse->getType()); |
1379 | 8 | unsigned IVWidth = SE->getTypeSizeInBits(WideType); |
1380 | 8 | if (CastWidth < IVWidth8 ) { |
1381 | 8 | // The cast isn't as wide as the IV, so insert a Trunc. |
1382 | 8 | IRBuilder<> Builder(DU.NarrowUse); |
1383 | 8 | NewDef = Builder.CreateTrunc(DU.WideDef, DU.NarrowUse->getType()); |
1384 | 8 | } |
1385 | 0 | else { |
1386 | 0 | // A wider extend was hidden behind a narrower one. This may induce |
1387 | 0 | // another round of IV widening in which the intermediate IV becomes |
1388 | 0 | // dead. It should be very rare. |
1389 | 0 | DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi |
1390 | 0 | << " not wide enough to subsume " << *DU.NarrowUse << "\n"); |
1391 | 0 | DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef); |
1392 | 0 | NewDef = DU.NarrowUse; |
1393 | 0 | } |
1394 | 8 | } |
1395 | 45.6k | if (NewDef != DU.NarrowUse45.6k ) { |
1396 | 45.6k | DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse |
1397 | 45.6k | << " replaced by " << *DU.WideDef << "\n"); |
1398 | 45.6k | ++NumElimExt; |
1399 | 45.6k | DU.NarrowUse->replaceAllUsesWith(NewDef); |
1400 | 45.6k | DeadInsts.emplace_back(DU.NarrowUse); |
1401 | 45.6k | } |
1402 | 45.6k | // Now that the extend is gone, we want to expose it's uses for potential |
1403 | 45.6k | // further simplification. We don't need to directly inform SimplifyIVUsers |
1404 | 45.6k | // of the new users, because their parent IV will be processed later as a |
1405 | 45.6k | // new loop phi. If we preserved IVUsers analysis, we would also want to |
1406 | 45.6k | // push the uses of WideDef here. |
1407 | 45.6k | |
1408 | 45.6k | // No further widening is needed. The deceased [sz]ext had done it for us. |
1409 | 45.6k | return nullptr; |
1410 | 45.6k | } |
1411 | 91.0k | |
1412 | 91.0k | // Does this user itself evaluate to a recurrence after widening? |
1413 | 91.0k | WidenedRecTy WideAddRec = getExtendedOperandRecurrence(DU); |
1414 | 91.0k | if (!WideAddRec.first) |
1415 | 45.7k | WideAddRec = getWideRecurrence(DU); |
1416 | 91.0k | |
1417 | 91.0k | assert((WideAddRec.first == nullptr) == (WideAddRec.second == Unknown)); |
1418 | 91.0k | if (!WideAddRec.first91.0k ) { |
1419 | 42.6k | // If use is a loop condition, try to promote the condition instead of |
1420 | 42.6k | // truncating the IV first. |
1421 | 42.6k | if (widenLoopCompare(DU)) |
1422 | 30.7k | return nullptr; |
1423 | 11.8k | |
1424 | 11.8k | // This user does not evaluate to a recurrence after widening, so don't |
1425 | 11.8k | // follow it. Instead insert a Trunc to kill off the original use, |
1426 | 11.8k | // eventually isolating the original narrow IV so it can be removed. |
1427 | 11.8k | truncateIVUse(DU, DT, LI); |
1428 | 11.8k | return nullptr; |
1429 | 11.8k | } |
1430 | 48.4k | // Assume block terminators cannot evaluate to a recurrence. We can't to |
1431 | 48.4k | // insert a Trunc after a terminator if there happens to be a critical edge. |
1432 | 91.0k | assert(DU.NarrowUse != DU.NarrowUse->getParent()->getTerminator() && |
1433 | 48.4k | "SCEV is not expected to evaluate a block terminator"); |
1434 | 48.4k | |
1435 | 48.4k | // Reuse the IV increment that SCEVExpander created as long as it dominates |
1436 | 48.4k | // NarrowUse. |
1437 | 48.4k | Instruction *WideUse = nullptr; |
1438 | 48.4k | if (WideAddRec.first == WideIncExpr && |
1439 | 32.5k | Rewriter.hoistIVInc(WideInc, DU.NarrowUse)) |
1440 | 32.2k | WideUse = WideInc; |
1441 | 16.1k | else { |
1442 | 16.1k | WideUse = cloneIVUser(DU, WideAddRec.first); |
1443 | 16.1k | if (!WideUse) |
1444 | 394 | return nullptr; |
1445 | 48.0k | } |
1446 | 48.0k | // Evaluation of WideAddRec ensured that the narrow expression could be |
1447 | 48.0k | // extended outside the loop without overflow. This suggests that the wide use |
1448 | 48.0k | // evaluates to the same expression as the extended narrow use, but doesn't |
1449 | 48.0k | // absolutely guarantee it. Hence the following failsafe check. In rare cases |
1450 | 48.0k | // where it fails, we simply throw away the newly created wide use. |
1451 | 48.0k | if (48.0k WideAddRec.first != SE->getSCEV(WideUse)48.0k ) { |
1452 | 130 | DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse |
1453 | 130 | << ": " << *SE->getSCEV(WideUse) << " != " << *WideAddRec.first << "\n"); |
1454 | 130 | DeadInsts.emplace_back(WideUse); |
1455 | 130 | return nullptr; |
1456 | 130 | } |
1457 | 47.9k | |
1458 | 47.9k | ExtendKindMap[DU.NarrowUse] = WideAddRec.second; |
1459 | 47.9k | // Returning WideUse pushes it on the worklist. |
1460 | 47.9k | return WideUse; |
1461 | 47.9k | } |
1462 | | |
1463 | | /// Add eligible users of NarrowDef to NarrowIVUsers. |
1464 | | /// |
1465 | 80.3k | void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) { |
1466 | 80.3k | const SCEV *NarrowSCEV = SE->getSCEV(NarrowDef); |
1467 | 80.3k | bool NonNegativeDef = |
1468 | 80.3k | SE->isKnownPredicate(ICmpInst::ICMP_SGE, NarrowSCEV, |
1469 | 80.3k | SE->getConstant(NarrowSCEV->getType(), 0)); |
1470 | 174k | for (User *U : NarrowDef->users()) { |
1471 | 174k | Instruction *NarrowUser = cast<Instruction>(U); |
1472 | 174k | |
1473 | 174k | // Handle data flow merges and bizarre phi cycles. |
1474 | 174k | if (!Widened.insert(NarrowUser).second) |
1475 | 33.2k | continue; |
1476 | 140k | |
1477 | 140k | bool NonNegativeUse = false; |
1478 | 140k | if (!NonNegativeDef140k ) { |
1479 | 34.9k | // We might have a control-dependent range information for this context. |
1480 | 34.9k | if (auto RangeInfo = getPostIncRangeInfo(NarrowDef, NarrowUser)) |
1481 | 96 | NonNegativeUse = RangeInfo->getSignedMin().isNonNegative(); |
1482 | 34.9k | } |
1483 | 140k | |
1484 | 140k | NarrowIVUsers.emplace_back(NarrowDef, NarrowUser, WideDef, |
1485 | 34.9k | NonNegativeDef || NonNegativeUse); |
1486 | 174k | } |
1487 | 80.3k | } |
1488 | | |
1489 | | /// Process a single induction variable. First use the SCEVExpander to create a |
1490 | | /// wide induction variable that evaluates to the same recurrence as the |
1491 | | /// original narrow IV. Then use a worklist to forward traverse the narrow IV's |
1492 | | /// def-use chain. After widenIVUse has processed all interesting IV users, the |
1493 | | /// narrow IV will be isolated for removal by DeleteDeadPHIs. |
1494 | | /// |
1495 | | /// It would be simpler to delete uses as they are processed, but we must avoid |
1496 | | /// invalidating SCEV expressions. |
1497 | | /// |
1498 | 43.7k | PHINode *WidenIV::createWideIV(SCEVExpander &Rewriter) { |
1499 | 43.7k | // Is this phi an induction variable? |
1500 | 43.7k | const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi)); |
1501 | 43.7k | if (!AddRec) |
1502 | 5.98k | return nullptr; |
1503 | 37.7k | |
1504 | 37.7k | // Widen the induction variable expression. |
1505 | 37.7k | const SCEV *WideIVExpr = getExtendKind(OrigPhi) == SignExtended |
1506 | 31.0k | ? SE->getSignExtendExpr(AddRec, WideType) |
1507 | 6.64k | : SE->getZeroExtendExpr(AddRec, WideType); |
1508 | 37.7k | |
1509 | 37.7k | assert(SE->getEffectiveSCEVType(WideIVExpr->getType()) == WideType && |
1510 | 37.7k | "Expect the new IV expression to preserve its type"); |
1511 | 37.7k | |
1512 | 37.7k | // Can the IV be extended outside the loop without overflow? |
1513 | 37.7k | AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr); |
1514 | 37.7k | if (!AddRec || 37.7k AddRec->getLoop() != L32.4k ) |
1515 | 5.30k | return nullptr; |
1516 | 32.4k | |
1517 | 32.4k | // An AddRec must have loop-invariant operands. Since this AddRec is |
1518 | 32.4k | // materialized by a loop header phi, the expression cannot have any post-loop |
1519 | 32.4k | // operands, so they must dominate the loop header. |
1520 | 37.7k | assert( |
1521 | 32.4k | SE->properlyDominates(AddRec->getStart(), L->getHeader()) && |
1522 | 32.4k | SE->properlyDominates(AddRec->getStepRecurrence(*SE), L->getHeader()) && |
1523 | 32.4k | "Loop header phi recurrence inputs do not dominate the loop"); |
1524 | 32.4k | |
1525 | 32.4k | // Iterate over IV uses (including transitive ones) looking for IV increments |
1526 | 32.4k | // of the form 'add nsw %iv, <const>'. For each increment and each use of |
1527 | 32.4k | // the increment calculate control-dependent range information basing on |
1528 | 32.4k | // dominating conditions inside of the loop (e.g. a range check inside of the |
1529 | 32.4k | // loop). Calculated ranges are stored in PostIncRangeInfos map. |
1530 | 32.4k | // |
1531 | 32.4k | // Control-dependent range information is later used to prove that a narrow |
1532 | 32.4k | // definition is not negative (see pushNarrowIVUsers). It's difficult to do |
1533 | 32.4k | // this on demand because when pushNarrowIVUsers needs this information some |
1534 | 32.4k | // of the dominating conditions might be already widened. |
1535 | 32.4k | if (UsePostIncrementRanges) |
1536 | 32.4k | calculatePostIncRanges(OrigPhi); |
1537 | 32.4k | |
1538 | 32.4k | // The rewriter provides a value for the desired IV expression. This may |
1539 | 32.4k | // either find an existing phi or materialize a new one. Either way, we |
1540 | 32.4k | // expect a well-formed cyclic phi-with-increments. i.e. any operand not part |
1541 | 32.4k | // of the phi-SCC dominates the loop entry. |
1542 | 32.4k | Instruction *InsertPt = &L->getHeader()->front(); |
1543 | 32.4k | WidePhi = cast<PHINode>(Rewriter.expandCodeFor(AddRec, WideType, InsertPt)); |
1544 | 32.4k | |
1545 | 32.4k | // Remembering the WideIV increment generated by SCEVExpander allows |
1546 | 32.4k | // widenIVUse to reuse it when widening the narrow IV's increment. We don't |
1547 | 32.4k | // employ a general reuse mechanism because the call above is the only call to |
1548 | 32.4k | // SCEVExpander. Henceforth, we produce 1-to-1 narrow to wide uses. |
1549 | 32.4k | if (BasicBlock *LatchBlock32.4k = L->getLoopLatch()) { |
1550 | 32.4k | WideInc = |
1551 | 32.4k | cast<Instruction>(WidePhi->getIncomingValueForBlock(LatchBlock)); |
1552 | 32.4k | WideIncExpr = SE->getSCEV(WideInc); |
1553 | 32.4k | // Propagate the debug location associated with the original loop increment |
1554 | 32.4k | // to the new (widened) increment. |
1555 | 32.4k | auto *OrigInc = |
1556 | 32.4k | cast<Instruction>(OrigPhi->getIncomingValueForBlock(LatchBlock)); |
1557 | 32.4k | WideInc->setDebugLoc(OrigInc->getDebugLoc()); |
1558 | 32.4k | } |
1559 | 32.4k | |
1560 | 32.4k | DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n"); |
1561 | 32.4k | ++NumWidened; |
1562 | 32.4k | |
1563 | 32.4k | // Traverse the def-use chain using a worklist starting at the original IV. |
1564 | 32.4k | assert(Widened.empty() && NarrowIVUsers.empty() && "expect initial state" ); |
1565 | 32.4k | |
1566 | 32.4k | Widened.insert(OrigPhi); |
1567 | 32.4k | pushNarrowIVUsers(OrigPhi, WidePhi); |
1568 | 32.4k | |
1569 | 173k | while (!NarrowIVUsers.empty()173k ) { |
1570 | 140k | NarrowIVDefUse DU = NarrowIVUsers.pop_back_val(); |
1571 | 140k | |
1572 | 140k | // Process a def-use edge. This may replace the use, so don't hold a |
1573 | 140k | // use_iterator across it. |
1574 | 140k | Instruction *WideUse = widenIVUse(DU, Rewriter); |
1575 | 140k | |
1576 | 140k | // Follow all def-use edges from the previous narrow use. |
1577 | 140k | if (WideUse) |
1578 | 47.9k | pushNarrowIVUsers(DU.NarrowUse, WideUse); |
1579 | 140k | |
1580 | 140k | // widenIVUse may have removed the def-use edge. |
1581 | 140k | if (DU.NarrowDef->use_empty()) |
1582 | 1.92k | DeadInsts.emplace_back(DU.NarrowDef); |
1583 | 140k | } |
1584 | 43.7k | return WidePhi; |
1585 | 43.7k | } |
1586 | | |
1587 | | /// Calculates control-dependent range for the given def at the given context |
1588 | | /// by looking at dominating conditions inside of the loop |
1589 | | void WidenIV::calculatePostIncRange(Instruction *NarrowDef, |
1590 | 941k | Instruction *NarrowUser) { |
1591 | 941k | using namespace llvm::PatternMatch; |
1592 | 941k | |
1593 | 941k | Value *NarrowDefLHS; |
1594 | 941k | const APInt *NarrowDefRHS; |
1595 | 941k | if (!match(NarrowDef, m_NSWAdd(m_Value(NarrowDefLHS), |
1596 | 941k | m_APInt(NarrowDefRHS))) || |
1597 | 41.4k | !NarrowDefRHS->isNonNegative()) |
1598 | 903k | return; |
1599 | 37.2k | |
1600 | 37.2k | auto UpdateRangeFromCondition = [&] (Value *Condition, |
1601 | 23.5k | bool TrueDest) { |
1602 | 23.5k | CmpInst::Predicate Pred; |
1603 | 23.5k | Value *CmpRHS; |
1604 | 23.5k | if (!match(Condition, m_ICmp(Pred, m_Specific(NarrowDefLHS), |
1605 | 23.5k | m_Value(CmpRHS)))) |
1606 | 23.3k | return; |
1607 | 215 | |
1608 | 215 | CmpInst::Predicate P = |
1609 | 215 | TrueDest ? Pred76 : CmpInst::getInversePredicate(Pred)139 ; |
1610 | 23.5k | |
1611 | 23.5k | auto CmpRHSRange = SE->getSignedRange(SE->getSCEV(CmpRHS)); |
1612 | 23.5k | auto CmpConstrainedLHSRange = |
1613 | 23.5k | ConstantRange::makeAllowedICmpRegion(P, CmpRHSRange); |
1614 | 23.5k | auto NarrowDefRange = |
1615 | 23.5k | CmpConstrainedLHSRange.addWithNoSignedWrap(*NarrowDefRHS); |
1616 | 23.5k | |
1617 | 23.5k | updatePostIncRangeInfo(NarrowDef, NarrowUser, NarrowDefRange); |
1618 | 23.5k | }; |
1619 | 37.2k | |
1620 | 107k | auto UpdateRangeFromGuards = [&](Instruction *Ctx) { |
1621 | 107k | if (!HasGuards) |
1622 | 107k | return; |
1623 | 26 | |
1624 | 26 | for (Instruction &I : make_range(Ctx->getIterator().getReverse(), |
1625 | 98 | Ctx->getParent()->rend())) { |
1626 | 98 | Value *C = nullptr; |
1627 | 98 | if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(C)))) |
1628 | 5 | UpdateRangeFromCondition(C, /*TrueDest=*/true); |
1629 | 98 | } |
1630 | 107k | }; |
1631 | 37.2k | |
1632 | 37.2k | UpdateRangeFromGuards(NarrowUser); |
1633 | 37.2k | |
1634 | 37.2k | BasicBlock *NarrowUserBB = NarrowUser->getParent(); |
1635 | 37.2k | // If NarrowUserBB is statically unreachable asking dominator queries may |
1636 | 37.2k | // yield surprising results. (e.g. the block may not have a dom tree node) |
1637 | 37.2k | if (!DT->isReachableFromEntry(NarrowUserBB)) |
1638 | 0 | return; |
1639 | 37.2k | |
1640 | 37.2k | for (auto *DTB = (*DT)[NarrowUserBB]->getIDom(); |
1641 | 107k | L->contains(DTB->getBlock()); |
1642 | 70.2k | DTB = DTB->getIDom()70.2k ) { |
1643 | 70.2k | auto *BB = DTB->getBlock(); |
1644 | 70.2k | auto *TI = BB->getTerminator(); |
1645 | 70.2k | UpdateRangeFromGuards(TI); |
1646 | 70.2k | |
1647 | 70.2k | auto *BI = dyn_cast<BranchInst>(TI); |
1648 | 70.2k | if (!BI || 70.2k !BI->isConditional()66.5k ) |
1649 | 31.2k | continue; |
1650 | 38.9k | |
1651 | 38.9k | auto *TrueSuccessor = BI->getSuccessor(0); |
1652 | 38.9k | auto *FalseSuccessor = BI->getSuccessor(1); |
1653 | 38.9k | |
1654 | 77.9k | auto DominatesNarrowUser = [this, NarrowUser] (BasicBlockEdge BBE) { |
1655 | 77.9k | return BBE.isSingleEdge() && |
1656 | 77.9k | DT->dominates(BBE, NarrowUser->getParent()); |
1657 | 77.9k | }; |
1658 | 38.9k | |
1659 | 38.9k | if (DominatesNarrowUser(BasicBlockEdge(BB, TrueSuccessor))) |
1660 | 15.2k | UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/true); |
1661 | 38.9k | |
1662 | 38.9k | if (DominatesNarrowUser(BasicBlockEdge(BB, FalseSuccessor))) |
1663 | 8.36k | UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/false); |
1664 | 70.2k | } |
1665 | 941k | } |
1666 | | |
1667 | | /// Calculates PostIncRangeInfos map for the given IV |
1668 | 32.4k | void WidenIV::calculatePostIncRanges(PHINode *OrigPhi) { |
1669 | 32.4k | SmallPtrSet<Instruction *, 16> Visited; |
1670 | 32.4k | SmallVector<Instruction *, 6> Worklist; |
1671 | 32.4k | Worklist.push_back(OrigPhi); |
1672 | 32.4k | Visited.insert(OrigPhi); |
1673 | 32.4k | |
1674 | 1.00M | while (!Worklist.empty()1.00M ) { |
1675 | 973k | Instruction *NarrowDef = Worklist.pop_back_val(); |
1676 | 973k | |
1677 | 1.14M | for (Use &U : NarrowDef->uses()) { |
1678 | 1.14M | auto *NarrowUser = cast<Instruction>(U.getUser()); |
1679 | 1.14M | |
1680 | 1.14M | // Don't go looking outside the current loop. |
1681 | 1.14M | auto *NarrowUserLoop = (*LI)[NarrowUser->getParent()]; |
1682 | 1.14M | if (!NarrowUserLoop || 1.14M !L->contains(NarrowUserLoop)1.13M ) |
1683 | 14.3k | continue; |
1684 | 1.13M | |
1685 | 1.13M | if (1.13M !Visited.insert(NarrowUser).second1.13M ) |
1686 | 189k | continue; |
1687 | 941k | |
1688 | 941k | Worklist.push_back(NarrowUser); |
1689 | 941k | |
1690 | 941k | calculatePostIncRange(NarrowDef, NarrowUser); |
1691 | 941k | } |
1692 | 973k | } |
1693 | 32.4k | } |
1694 | | |
1695 | | //===----------------------------------------------------------------------===// |
1696 | | // Live IV Reduction - Minimize IVs live across the loop. |
1697 | | //===----------------------------------------------------------------------===// |
1698 | | |
1699 | | |
1700 | | //===----------------------------------------------------------------------===// |
1701 | | // Simplification of IV users based on SCEV evaluation. |
1702 | | //===----------------------------------------------------------------------===// |
1703 | | |
1704 | | namespace { |
1705 | | class IndVarSimplifyVisitor : public IVVisitor { |
1706 | | ScalarEvolution *SE; |
1707 | | const TargetTransformInfo *TTI; |
1708 | | PHINode *IVPhi; |
1709 | | |
1710 | | public: |
1711 | | WideIVInfo WI; |
1712 | | |
1713 | | IndVarSimplifyVisitor(PHINode *IV, ScalarEvolution *SCEV, |
1714 | | const TargetTransformInfo *TTI, |
1715 | | const DominatorTree *DTree) |
1716 | 519k | : SE(SCEV), TTI(TTI), IVPhi(IV) { |
1717 | 519k | DT = DTree; |
1718 | 519k | WI.NarrowIV = IVPhi; |
1719 | 519k | } |
1720 | | |
1721 | | // Implement the interface used by simplifyUsersOfIV. |
1722 | 226k | void visitCast(CastInst *Cast) override { visitIVCast(Cast, WI, SE, TTI); } |
1723 | | }; |
1724 | | } |
1725 | | |
1726 | | /// Iteratively perform simplification on a worklist of IV users. Each |
1727 | | /// successive simplification may push more users which may themselves be |
1728 | | /// candidates for simplification. |
1729 | | /// |
1730 | | /// Sign/Zero extend elimination is interleaved with IV simplification. |
1731 | | /// |
1732 | | void IndVarSimplify::simplifyAndExtend(Loop *L, |
1733 | | SCEVExpander &Rewriter, |
1734 | 364k | LoopInfo *LI) { |
1735 | 364k | SmallVector<WideIVInfo, 8> WideIVs; |
1736 | 364k | |
1737 | 364k | auto *GuardDecl = L->getBlocks()[0]->getModule()->getFunction( |
1738 | 364k | Intrinsic::getName(Intrinsic::experimental_guard)); |
1739 | 15 | bool HasGuards = GuardDecl && !GuardDecl->use_empty(); |
1740 | 364k | |
1741 | 364k | SmallVector<PHINode*, 8> LoopPhis; |
1742 | 851k | for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I)851k ; ++I487k ) { |
1743 | 487k | LoopPhis.push_back(cast<PHINode>(I)); |
1744 | 487k | } |
1745 | 364k | // Each round of simplification iterates through the SimplifyIVUsers worklist |
1746 | 364k | // for all current phis, then determines whether any IVs can be |
1747 | 364k | // widened. Widening adds new phis to LoopPhis, inducing another round of |
1748 | 364k | // simplification on the wide IVs. |
1749 | 732k | while (!LoopPhis.empty()732k ) { |
1750 | 368k | // Evaluate as many IV expressions as possible before widening any IVs. This |
1751 | 368k | // forces SCEV to set no-wrap flags before evaluating sign/zero |
1752 | 368k | // extension. The first time SCEV attempts to normalize sign/zero extension, |
1753 | 368k | // the result becomes final. So for the most predictable results, we delay |
1754 | 368k | // evaluation of sign/zero extend evaluation until needed, and avoid running |
1755 | 368k | // other SCEV based analysis prior to simplifyAndExtend. |
1756 | 519k | do { |
1757 | 519k | PHINode *CurrIV = LoopPhis.pop_back_val(); |
1758 | 519k | |
1759 | 519k | // Information about sign/zero extensions of CurrIV. |
1760 | 519k | IndVarSimplifyVisitor Visitor(CurrIV, SE, TTI, DT); |
1761 | 519k | |
1762 | 519k | Changed |= simplifyUsersOfIV(CurrIV, SE, DT, LI, DeadInsts, &Visitor); |
1763 | 519k | |
1764 | 519k | if (Visitor.WI.WidestNativeType519k ) { |
1765 | 43.7k | WideIVs.push_back(Visitor.WI); |
1766 | 43.7k | } |
1767 | 519k | } while(!LoopPhis.empty()); |
1768 | 368k | |
1769 | 412k | for (; !WideIVs.empty()412k ; WideIVs.pop_back()43.7k ) { |
1770 | 43.7k | WidenIV Widener(WideIVs.back(), LI, SE, DT, DeadInsts, HasGuards); |
1771 | 43.7k | if (PHINode *WidePhi43.7k = Widener.createWideIV(Rewriter)) { |
1772 | 32.4k | Changed = true; |
1773 | 32.4k | LoopPhis.push_back(WidePhi); |
1774 | 32.4k | } |
1775 | 43.7k | } |
1776 | 368k | } |
1777 | 364k | } |
1778 | | |
1779 | | //===----------------------------------------------------------------------===// |
1780 | | // linearFunctionTestReplace and its kin. Rewrite the loop exit condition. |
1781 | | //===----------------------------------------------------------------------===// |
1782 | | |
1783 | | /// Return true if this loop's backedge taken count expression can be safely and |
1784 | | /// cheaply expanded into an instruction sequence that can be used by |
1785 | | /// linearFunctionTestReplace. |
1786 | | /// |
1787 | | /// TODO: This fails for pointer-type loop counters with greater than one byte |
1788 | | /// strides, consequently preventing LFTR from running. For the purpose of LFTR |
1789 | | /// we could skip this check in the case that the LFTR loop counter (chosen by |
1790 | | /// FindLoopCounter) is also pointer type. Instead, we could directly convert |
1791 | | /// the loop test to an inequality test by checking the target data's alignment |
1792 | | /// of element types (given that the initial pointer value originates from or is |
1793 | | /// used by ABI constrained operation, as opposed to inttoptr/ptrtoint). |
1794 | | /// However, we don't yet have a strong motivation for converting loop tests |
1795 | | /// into inequality tests. |
1796 | | static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE, |
1797 | 364k | SCEVExpander &Rewriter) { |
1798 | 364k | const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); |
1799 | 364k | if (isa<SCEVCouldNotCompute>(BackedgeTakenCount) || |
1800 | 141k | BackedgeTakenCount->isZero()) |
1801 | 223k | return false; |
1802 | 140k | |
1803 | 140k | if (140k !L->getExitingBlock()140k ) |
1804 | 11 | return false; |
1805 | 140k | |
1806 | 140k | // Can't rewrite non-branch yet. |
1807 | 140k | if (140k !isa<BranchInst>(L->getExitingBlock()->getTerminator())140k ) |
1808 | 0 | return false; |
1809 | 140k | |
1810 | 140k | if (140k Rewriter.isHighCostExpansion(BackedgeTakenCount, L)140k ) |
1811 | 20.8k | return false; |
1812 | 119k | |
1813 | 119k | return true; |
1814 | 119k | } |
1815 | | |
1816 | | /// Return the loop header phi IFF IncV adds a loop invariant value to the phi. |
1817 | 220k | static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) { |
1818 | 220k | Instruction *IncI = dyn_cast<Instruction>(IncV); |
1819 | 220k | if (!IncI) |
1820 | 669 | return nullptr; |
1821 | 219k | |
1822 | 219k | switch (IncI->getOpcode()) { |
1823 | 183k | case Instruction::Add: |
1824 | 183k | case Instruction::Sub: |
1825 | 183k | break; |
1826 | 6.50k | case Instruction::GetElementPtr: |
1827 | 6.50k | // An IV counter must preserve its type. |
1828 | 6.50k | if (IncI->getNumOperands() == 2) |
1829 | 6.50k | break; |
1830 | 2 | LLVM_FALLTHROUGH2 ; |
1831 | 29.5k | default: |
1832 | 29.5k | return nullptr; |
1833 | 189k | } |
1834 | 189k | |
1835 | 189k | PHINode *Phi = dyn_cast<PHINode>(IncI->getOperand(0)); |
1836 | 189k | if (Phi && 189k Phi->getParent() == L->getHeader()188k ) { |
1837 | 188k | if (isLoopInvariant(IncI->getOperand(1), L, DT)) |
1838 | 188k | return Phi; |
1839 | 0 | return nullptr; |
1840 | 0 | } |
1841 | 1.48k | if (1.48k IncI->getOpcode() == Instruction::GetElementPtr1.48k ) |
1842 | 13 | return nullptr; |
1843 | 1.47k | |
1844 | 1.47k | // Allow add/sub to be commuted. |
1845 | 1.47k | Phi = dyn_cast<PHINode>(IncI->getOperand(1)); |
1846 | 1.47k | if (Phi && 1.47k Phi->getParent() == L->getHeader()6 ) { |
1847 | 0 | if (isLoopInvariant(IncI->getOperand(0), L, DT)) |
1848 | 0 | return Phi; |
1849 | 1.47k | } |
1850 | 1.47k | return nullptr; |
1851 | 1.47k | } |
1852 | | |
1853 | | /// Return the compare guarding the loop latch, or NULL for unrecognized tests. |
1854 | 123k | static ICmpInst *getLoopTest(Loop *L) { |
1855 | 123k | assert(L->getExitingBlock() && "expected loop exit"); |
1856 | 123k | |
1857 | 123k | BasicBlock *LatchBlock = L->getLoopLatch(); |
1858 | 123k | // Don't bother with LFTR if the loop is not properly simplified. |
1859 | 123k | if (!LatchBlock) |
1860 | 0 | return nullptr; |
1861 | 123k | |
1862 | 123k | BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator()); |
1863 | 123k | assert(BI && "expected exit branch"); |
1864 | 123k | |
1865 | 123k | return dyn_cast<ICmpInst>(BI->getCondition()); |
1866 | 123k | } |
1867 | | |
1868 | | /// linearFunctionTestReplace policy. Return true unless we can show that the |
1869 | | /// current exit test is already sufficiently canonical. |
1870 | 119k | static bool needsLFTR(Loop *L, DominatorTree *DT) { |
1871 | 119k | // Do LFTR to simplify the exit condition to an ICMP. |
1872 | 119k | ICmpInst *Cond = getLoopTest(L); |
1873 | 119k | if (!Cond) |
1874 | 21 | return true; |
1875 | 119k | |
1876 | 119k | // Do LFTR to simplify the exit ICMP to EQ/NE |
1877 | 119k | ICmpInst::Predicate Pred = Cond->getPredicate(); |
1878 | 119k | if (Pred != ICmpInst::ICMP_NE && 119k Pred != ICmpInst::ICMP_EQ119k ) |
1879 | 38.1k | return true; |
1880 | 81.4k | |
1881 | 81.4k | // Look for a loop invariant RHS |
1882 | 81.4k | Value *LHS = Cond->getOperand(0); |
1883 | 81.4k | Value *RHS = Cond->getOperand(1); |
1884 | 81.4k | if (!isLoopInvariant(RHS, L, DT)81.4k ) { |
1885 | 54 | if (!isLoopInvariant(LHS, L, DT)) |
1886 | 0 | return true; |
1887 | 54 | std::swap(LHS, RHS); |
1888 | 54 | } |
1889 | 81.4k | // Look for a simple IV counter LHS |
1890 | 81.4k | PHINode *Phi = dyn_cast<PHINode>(LHS); |
1891 | 81.4k | if (!Phi) |
1892 | 81.0k | Phi = getLoopPhiForCounter(LHS, L, DT); |
1893 | 81.4k | |
1894 | 81.4k | if (!Phi) |
1895 | 26.3k | return true; |
1896 | 55.0k | |
1897 | 55.0k | // Do LFTR if PHI node is defined in the loop, but is *not* a counter. |
1898 | 55.0k | int Idx = Phi->getBasicBlockIndex(L->getLoopLatch()); |
1899 | 55.0k | if (Idx < 0) |
1900 | 1 | return true; |
1901 | 55.0k | |
1902 | 55.0k | // Do LFTR if the exit condition's IV is *not* a simple counter. |
1903 | 55.0k | Value *IncV = Phi->getIncomingValue(Idx); |
1904 | 55.0k | return Phi != getLoopPhiForCounter(IncV, L, DT); |
1905 | 55.0k | } |
1906 | | |
1907 | | /// Recursive helper for hasConcreteDef(). Unfortunately, this currently boils |
1908 | | /// down to checking that all operands are constant and listing instructions |
1909 | | /// that may hide undef. |
1910 | | static bool hasConcreteDefImpl(Value *V, SmallPtrSetImpl<Value*> &Visited, |
1911 | 313k | unsigned Depth) { |
1912 | 313k | if (isa<Constant>(V)) |
1913 | 144k | return !isa<UndefValue>(V); |
1914 | 168k | |
1915 | 168k | if (168k Depth >= 6168k ) |
1916 | 1.17k | return false; |
1917 | 167k | |
1918 | 167k | // Conservatively handle non-constant non-instructions. For example, Arguments |
1919 | 167k | // may be undef. |
1920 | 167k | Instruction *I = dyn_cast<Instruction>(V); |
1921 | 167k | if (!I) |
1922 | 552 | return false; |
1923 | 166k | |
1924 | 166k | // Load and return values may be undef. |
1925 | 166k | if(166k I->mayReadFromMemory() || 166k isa<CallInst>(I)164k || isa<InvokeInst>(I)164k ) |
1926 | 2.28k | return false; |
1927 | 164k | |
1928 | 164k | // Optimistically handle other instructions. |
1929 | 164k | for (Value *Op : I->operands()) 164k { |
1930 | 317k | if (!Visited.insert(Op).second) |
1931 | 80.9k | continue; |
1932 | 237k | if (237k !hasConcreteDefImpl(Op, Visited, Depth+1)237k ) |
1933 | 12.6k | return false; |
1934 | 151k | } |
1935 | 151k | return true; |
1936 | 151k | } |
1937 | | |
1938 | | /// Return true if the given value is concrete. We must prove that undef can |
1939 | | /// never reach it. |
1940 | | /// |
1941 | | /// TODO: If we decide that this is a good approach to checking for undef, we |
1942 | | /// may factor it into a common location. |
1943 | 76.1k | static bool hasConcreteDef(Value *V) { |
1944 | 76.1k | SmallPtrSet<Value*, 8> Visited; |
1945 | 76.1k | Visited.insert(V); |
1946 | 76.1k | return hasConcreteDefImpl(V, Visited, 0); |
1947 | 76.1k | } |
1948 | | |
1949 | | /// Return true if this IV has any uses other than the (soon to be rewritten) |
1950 | | /// loop exit test. |
1951 | 32.1k | static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) { |
1952 | 32.1k | int LatchIdx = Phi->getBasicBlockIndex(LatchBlock); |
1953 | 32.1k | Value *IncV = Phi->getIncomingValue(LatchIdx); |
1954 | 32.1k | |
1955 | 32.1k | for (User *U : Phi->users()) |
1956 | 44.7k | if (44.7k U != Cond && 44.7k U != IncV35.7k ) return false31.8k ; |
1957 | 298 | |
1958 | 298 | for (User *U : IncV->users()) |
1959 | 370 | if (370 U != Cond && 370 U != Phi330 ) return false148 ; |
1960 | 150 | return true; |
1961 | 150 | } |
1962 | | |
1963 | | /// Find an affine IV in canonical form. |
1964 | | /// |
1965 | | /// BECount may be an i8* pointer type. The pointer difference is already |
1966 | | /// valid count without scaling the address stride, so it remains a pointer |
1967 | | /// expression as far as SCEV is concerned. |
1968 | | /// |
1969 | | /// Currently only valid for LFTR. See the comments on hasConcreteDef below. |
1970 | | /// |
1971 | | /// FIXME: Accept -1 stride and set IVLimit = IVInit - BECount |
1972 | | /// |
1973 | | /// FIXME: Accept non-unit stride as long as SCEV can reduce BECount * Stride. |
1974 | | /// This is difficult in general for SCEV because of potential overflow. But we |
1975 | | /// could at least handle constant BECounts. |
1976 | | static PHINode *FindLoopCounter(Loop *L, const SCEV *BECount, |
1977 | 64.6k | ScalarEvolution *SE, DominatorTree *DT) { |
1978 | 64.6k | uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType()); |
1979 | 64.6k | |
1980 | 64.6k | Value *Cond = |
1981 | 64.6k | cast<BranchInst>(L->getExitingBlock()->getTerminator())->getCondition(); |
1982 | 64.6k | |
1983 | 64.6k | // Loop over all of the PHI nodes, looking for a simple counter. |
1984 | 64.6k | PHINode *BestPhi = nullptr; |
1985 | 64.6k | const SCEV *BestInit = nullptr; |
1986 | 64.6k | BasicBlock *LatchBlock = L->getLoopLatch(); |
1987 | 64.6k | assert(LatchBlock && "needsLFTR should guarantee a loop latch"); |
1988 | 64.6k | const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); |
1989 | 64.6k | |
1990 | 170k | for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I)170k ; ++I105k ) { |
1991 | 105k | PHINode *Phi = cast<PHINode>(I); |
1992 | 105k | if (!SE->isSCEVable(Phi->getType())) |
1993 | 2.85k | continue; |
1994 | 102k | |
1995 | 102k | // Avoid comparing an integer IV against a pointer Limit. |
1996 | 102k | if (102k BECount->getType()->isPointerTy() && 102k !Phi->getType()->isPointerTy()71 ) |
1997 | 28 | continue; |
1998 | 102k | |
1999 | 102k | const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Phi)); |
2000 | 102k | if (!AR || 102k AR->getLoop() != L93.3k || !AR->isAffine()93.3k ) |
2001 | 9.19k | continue; |
2002 | 93.3k | |
2003 | 93.3k | // AR may be a pointer type, while BECount is an integer type. |
2004 | 93.3k | // AR may be wider than BECount. With eq/ne tests overflow is immaterial. |
2005 | 93.3k | // AR may not be a narrower type, or we may never exit. |
2006 | 93.3k | uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType()); |
2007 | 93.3k | if (PhiWidth < BCWidth || 93.3k !DL.isLegalInteger(PhiWidth)93.2k ) |
2008 | 276 | continue; |
2009 | 93.0k | |
2010 | 93.0k | const SCEV *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE)); |
2011 | 93.0k | if (!Step || 93.0k !Step->isOne()91.0k ) |
2012 | 16.3k | continue; |
2013 | 76.6k | |
2014 | 76.6k | int LatchIdx = Phi->getBasicBlockIndex(LatchBlock); |
2015 | 76.6k | Value *IncV = Phi->getIncomingValue(LatchIdx); |
2016 | 76.6k | if (getLoopPhiForCounter(IncV, L, DT) != Phi) |
2017 | 561 | continue; |
2018 | 76.1k | |
2019 | 76.1k | // Avoid reusing a potentially undef value to compute other values that may |
2020 | 76.1k | // have originally had a concrete definition. |
2021 | 76.1k | if (76.1k !hasConcreteDef(Phi)76.1k ) { |
2022 | 4.01k | // We explicitly allow unknown phis as long as they are already used by |
2023 | 4.01k | // the loop test. In this case we assume that performing LFTR could not |
2024 | 4.01k | // increase the number of undef users. |
2025 | 4.01k | if (ICmpInst *Cond4.01k = getLoopTest(L)) { |
2026 | 4.01k | if (Phi != getLoopPhiForCounter(Cond->getOperand(0), L, DT) && |
2027 | 4.01k | Phi != getLoopPhiForCounter(Cond->getOperand(1), L, DT)3.21k ) { |
2028 | 3.21k | continue; |
2029 | 3.21k | } |
2030 | 72.9k | } |
2031 | 4.01k | } |
2032 | 72.9k | const SCEV *Init = AR->getStart(); |
2033 | 72.9k | |
2034 | 72.9k | if (BestPhi && 72.9k !AlmostDeadIV(BestPhi, LatchBlock, Cond)16.1k ) { |
2035 | 16.0k | // Don't force a live loop counter if another IV can be used. |
2036 | 16.0k | if (AlmostDeadIV(Phi, LatchBlock, Cond)) |
2037 | 91 | continue; |
2038 | 15.9k | |
2039 | 15.9k | // Prefer to count-from-zero. This is a more "canonical" counter form. It |
2040 | 15.9k | // also prefers integer to pointer IVs. |
2041 | 15.9k | if (15.9k BestInit->isZero() != Init->isZero()15.9k ) { |
2042 | 291 | if (BestInit->isZero()) |
2043 | 231 | continue; |
2044 | 15.9k | } |
2045 | 15.9k | // If two IVs both count from zero or both count from nonzero then the |
2046 | 15.9k | // narrower is likely a dead phi that has been widened. Use the wider phi |
2047 | 15.9k | // to allow the other to be eliminated. |
2048 | 15.6k | else if (15.6k PhiWidth <= SE->getTypeSizeInBits(BestPhi->getType())15.6k ) |
2049 | 15.6k | continue; |
2050 | 56.9k | } |
2051 | 56.9k | BestPhi = Phi; |
2052 | 56.9k | BestInit = Init; |
2053 | 56.9k | } |
2054 | 64.6k | return BestPhi; |
2055 | 64.6k | } |
2056 | | |
2057 | | /// Help linearFunctionTestReplace by generating a value that holds the RHS of |
2058 | | /// the new loop test. |
2059 | | static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L, |
2060 | 56.7k | SCEVExpander &Rewriter, ScalarEvolution *SE) { |
2061 | 56.7k | const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(IndVar)); |
2062 | 56.7k | assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter"); |
2063 | 56.7k | const SCEV *IVInit = AR->getStart(); |
2064 | 56.7k | |
2065 | 56.7k | // IVInit may be a pointer while IVCount is an integer when FindLoopCounter |
2066 | 56.7k | // finds a valid pointer IV. Sign extend BECount in order to materialize a |
2067 | 56.7k | // GEP. Avoid running SCEVExpander on a new pointer value, instead reusing |
2068 | 56.7k | // the existing GEPs whenever possible. |
2069 | 56.7k | if (IndVar->getType()->isPointerTy() && 56.7k !IVCount->getType()->isPointerTy()152 ) { |
2070 | 114 | // IVOffset will be the new GEP offset that is interpreted by GEP as a |
2071 | 114 | // signed value. IVCount on the other hand represents the loop trip count, |
2072 | 114 | // which is an unsigned value. FindLoopCounter only allows induction |
2073 | 114 | // variables that have a positive unit stride of one. This means we don't |
2074 | 114 | // have to handle the case of negative offsets (yet) and just need to zero |
2075 | 114 | // extend IVCount. |
2076 | 114 | Type *OfsTy = SE->getEffectiveSCEVType(IVInit->getType()); |
2077 | 114 | const SCEV *IVOffset = SE->getTruncateOrZeroExtend(IVCount, OfsTy); |
2078 | 114 | |
2079 | 114 | // Expand the code for the iteration count. |
2080 | 114 | assert(SE->isLoopInvariant(IVOffset, L) && |
2081 | 114 | "Computed iteration count is not loop invariant!"); |
2082 | 114 | BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator()); |
2083 | 114 | Value *GEPOffset = Rewriter.expandCodeFor(IVOffset, OfsTy, BI); |
2084 | 114 | |
2085 | 114 | Value *GEPBase = IndVar->getIncomingValueForBlock(L->getLoopPreheader()); |
2086 | 114 | assert(AR->getStart() == SE->getSCEV(GEPBase) && "bad loop counter"); |
2087 | 114 | // We could handle pointer IVs other than i8*, but we need to compensate for |
2088 | 114 | // gep index scaling. See canExpandBackedgeTakenCount comments. |
2089 | 114 | assert(SE->getSizeOfExpr(IntegerType::getInt64Ty(IndVar->getContext()), |
2090 | 114 | cast<PointerType>(GEPBase->getType()) |
2091 | 114 | ->getElementType())->isOne() && |
2092 | 114 | "unit stride pointer IV must be i8*"); |
2093 | 114 | |
2094 | 114 | IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); |
2095 | 114 | return Builder.CreateGEP(nullptr, GEPBase, GEPOffset, "lftr.limit"); |
2096 | 0 | } else { |
2097 | 56.6k | // In any other case, convert both IVInit and IVCount to integers before |
2098 | 56.6k | // comparing. This may result in SCEV expansion of pointers, but in practice |
2099 | 56.6k | // SCEV will fold the pointer arithmetic away as such: |
2100 | 56.6k | // BECount = (IVEnd - IVInit - 1) => IVLimit = IVInit (postinc). |
2101 | 56.6k | // |
2102 | 56.6k | // Valid Cases: (1) both integers is most common; (2) both may be pointers |
2103 | 56.6k | // for simple memset-style loops. |
2104 | 56.6k | // |
2105 | 56.6k | // IVInit integer and IVCount pointer would only occur if a canonical IV |
2106 | 56.6k | // were generated on top of case #2, which is not expected. |
2107 | 56.6k | |
2108 | 56.6k | const SCEV *IVLimit = nullptr; |
2109 | 56.6k | // For unit stride, IVCount = Start + BECount with 2's complement overflow. |
2110 | 56.6k | // For non-zero Start, compute IVCount here. |
2111 | 56.6k | if (AR->getStart()->isZero()) |
2112 | 52.8k | IVLimit = IVCount; |
2113 | 3.87k | else { |
2114 | 3.87k | assert(AR->getStepRecurrence(*SE)->isOne() && "only handles unit stride"); |
2115 | 3.87k | const SCEV *IVInit = AR->getStart(); |
2116 | 3.87k | |
2117 | 3.87k | // For integer IVs, truncate the IV before computing IVInit + BECount. |
2118 | 3.87k | if (SE->getTypeSizeInBits(IVInit->getType()) |
2119 | 3.87k | > SE->getTypeSizeInBits(IVCount->getType())) |
2120 | 2.80k | IVInit = SE->getTruncateExpr(IVInit, IVCount->getType()); |
2121 | 3.87k | |
2122 | 3.87k | IVLimit = SE->getAddExpr(IVInit, IVCount); |
2123 | 3.87k | } |
2124 | 56.6k | // Expand the code for the iteration count. |
2125 | 56.6k | BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator()); |
2126 | 56.6k | IRBuilder<> Builder(BI); |
2127 | 56.6k | assert(SE->isLoopInvariant(IVLimit, L) && |
2128 | 56.6k | "Computed iteration count is not loop invariant!"); |
2129 | 56.6k | // Ensure that we generate the same type as IndVar, or a smaller integer |
2130 | 56.6k | // type. In the presence of null pointer values, we have an integer type |
2131 | 56.6k | // SCEV expression (IVInit) for a pointer type IV value (IndVar). |
2132 | 56.6k | Type *LimitTy = IVCount->getType()->isPointerTy() ? |
2133 | 56.6k | IndVar->getType()38 : IVCount->getType()56.6k ; |
2134 | 56.6k | return Rewriter.expandCodeFor(IVLimit, LimitTy, BI); |
2135 | 56.6k | } |
2136 | 0 | } |
2137 | | |
2138 | | /// This method rewrites the exit condition of the loop to be a canonical != |
2139 | | /// comparison against the incremented loop induction variable. This pass is |
2140 | | /// able to rewrite the exit tests of any loop where the SCEV analysis can |
2141 | | /// determine a loop-invariant trip count of the loop, which is actually a much |
2142 | | /// broader range than just linear tests. |
2143 | | Value *IndVarSimplify:: |
2144 | | linearFunctionTestReplace(Loop *L, |
2145 | | const SCEV *BackedgeTakenCount, |
2146 | | PHINode *IndVar, |
2147 | 56.7k | SCEVExpander &Rewriter) { |
2148 | 56.7k | assert(canExpandBackedgeTakenCount(L, SE, Rewriter) && "precondition"); |
2149 | 56.7k | |
2150 | 56.7k | // Initialize CmpIndVar and IVCount to their preincremented values. |
2151 | 56.7k | Value *CmpIndVar = IndVar; |
2152 | 56.7k | const SCEV *IVCount = BackedgeTakenCount; |
2153 | 56.7k | |
2154 | 56.7k | assert(L->getLoopLatch() && "Loop no longer in simplified form?"); |
2155 | 56.7k | |
2156 | 56.7k | // If the exiting block is the same as the backedge block, we prefer to |
2157 | 56.7k | // compare against the post-incremented value, otherwise we must compare |
2158 | 56.7k | // against the preincremented value. |
2159 | 56.7k | if (L->getExitingBlock() == L->getLoopLatch()56.7k ) { |
2160 | 56.7k | // Add one to the "backedge-taken" count to get the trip count. |
2161 | 56.7k | // This addition may overflow, which is valid as long as the comparison is |
2162 | 56.7k | // truncated to BackedgeTakenCount->getType(). |
2163 | 56.7k | IVCount = SE->getAddExpr(BackedgeTakenCount, |
2164 | 56.7k | SE->getOne(BackedgeTakenCount->getType())); |
2165 | 56.7k | // The BackedgeTaken expression contains the number of times that the |
2166 | 56.7k | // backedge branches to the loop header. This is one less than the |
2167 | 56.7k | // number of times the loop executes, so use the incremented indvar. |
2168 | 56.7k | CmpIndVar = IndVar->getIncomingValueForBlock(L->getExitingBlock()); |
2169 | 56.7k | } |
2170 | 56.7k | |
2171 | 56.7k | Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE); |
2172 | 56.7k | assert(ExitCnt->getType()->isPointerTy() == |
2173 | 56.7k | IndVar->getType()->isPointerTy() && |
2174 | 56.7k | "genLoopLimit missed a cast"); |
2175 | 56.7k | |
2176 | 56.7k | // Insert a new icmp_ne or icmp_eq instruction before the branch. |
2177 | 56.7k | BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator()); |
2178 | 56.7k | ICmpInst::Predicate P; |
2179 | 56.7k | if (L->contains(BI->getSuccessor(0))) |
2180 | 35.8k | P = ICmpInst::ICMP_NE; |
2181 | 56.7k | else |
2182 | 20.9k | P = ICmpInst::ICMP_EQ; |
2183 | 56.7k | |
2184 | 56.7k | DEBUG(dbgs() << "INDVARS: Rewriting loop exit condition to:\n" |
2185 | 56.7k | << " LHS:" << *CmpIndVar << '\n' |
2186 | 56.7k | << " op:\t" |
2187 | 56.7k | << (P == ICmpInst::ICMP_NE ? "!=" : "==") << "\n" |
2188 | 56.7k | << " RHS:\t" << *ExitCnt << "\n" |
2189 | 56.7k | << " IVCount:\t" << *IVCount << "\n"); |
2190 | 56.7k | |
2191 | 56.7k | IRBuilder<> Builder(BI); |
2192 | 56.7k | |
2193 | 56.7k | // The new loop exit condition should reuse the debug location of the |
2194 | 56.7k | // original loop exit condition. |
2195 | 56.7k | if (auto *Cond = dyn_cast<Instruction>(BI->getCondition())) |
2196 | 56.7k | Builder.SetCurrentDebugLocation(Cond->getDebugLoc()); |
2197 | 56.7k | |
2198 | 56.7k | // LFTR can ignore IV overflow and truncate to the width of |
2199 | 56.7k | // BECount. This avoids materializing the add(zext(add)) expression. |
2200 | 56.7k | unsigned CmpIndVarSize = SE->getTypeSizeInBits(CmpIndVar->getType()); |
2201 | 56.7k | unsigned ExitCntSize = SE->getTypeSizeInBits(ExitCnt->getType()); |
2202 | 56.7k | if (CmpIndVarSize > ExitCntSize56.7k ) { |
2203 | 36.6k | const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(SE->getSCEV(IndVar)); |
2204 | 36.6k | const SCEV *ARStart = AR->getStart(); |
2205 | 36.6k | const SCEV *ARStep = AR->getStepRecurrence(*SE); |
2206 | 36.6k | // For constant IVCount, avoid truncation. |
2207 | 36.6k | if (isa<SCEVConstant>(ARStart) && 36.6k isa<SCEVConstant>(IVCount)35.6k ) { |
2208 | 9.88k | const APInt &Start = cast<SCEVConstant>(ARStart)->getAPInt(); |
2209 | 9.88k | APInt Count = cast<SCEVConstant>(IVCount)->getAPInt(); |
2210 | 9.88k | // Note that the post-inc value of BackedgeTakenCount may have overflowed |
2211 | 9.88k | // above such that IVCount is now zero. |
2212 | 9.88k | if (IVCount != BackedgeTakenCount && 9.88k Count == 09.88k ) { |
2213 | 0 | Count = APInt::getMaxValue(Count.getBitWidth()).zext(CmpIndVarSize); |
2214 | 0 | ++Count; |
2215 | 0 | } |
2216 | 9.88k | else |
2217 | 9.88k | Count = Count.zext(CmpIndVarSize); |
2218 | 9.88k | APInt NewLimit; |
2219 | 9.88k | if (cast<SCEVConstant>(ARStep)->getValue()->isNegative()) |
2220 | 0 | NewLimit = Start - Count; |
2221 | 9.88k | else |
2222 | 9.88k | NewLimit = Start + Count; |
2223 | 9.88k | ExitCnt = ConstantInt::get(CmpIndVar->getType(), NewLimit); |
2224 | 9.88k | |
2225 | 9.88k | DEBUG(dbgs() << " Widen RHS:\t" << *ExitCnt << "\n"); |
2226 | 36.6k | } else { |
2227 | 26.7k | // We try to extend trip count first. If that doesn't work we truncate IV. |
2228 | 26.7k | // Zext(trunc(IV)) == IV implies equivalence of the following two: |
2229 | 26.7k | // Trunc(IV) == ExitCnt and IV == zext(ExitCnt). Similarly for sext. If |
2230 | 26.7k | // one of the two holds, extend the trip count, otherwise we truncate IV. |
2231 | 26.7k | bool Extended = false; |
2232 | 26.7k | const SCEV *IV = SE->getSCEV(CmpIndVar); |
2233 | 26.7k | const SCEV *ZExtTrunc = |
2234 | 26.7k | SE->getZeroExtendExpr(SE->getTruncateExpr(SE->getSCEV(CmpIndVar), |
2235 | 26.7k | ExitCnt->getType()), |
2236 | 26.7k | CmpIndVar->getType()); |
2237 | 26.7k | |
2238 | 26.7k | if (ZExtTrunc == IV26.7k ) { |
2239 | 26.0k | Extended = true; |
2240 | 26.0k | ExitCnt = Builder.CreateZExt(ExitCnt, IndVar->getType(), |
2241 | 26.0k | "wide.trip.count"); |
2242 | 26.7k | } else { |
2243 | 748 | const SCEV *SExtTrunc = |
2244 | 748 | SE->getSignExtendExpr(SE->getTruncateExpr(SE->getSCEV(CmpIndVar), |
2245 | 748 | ExitCnt->getType()), |
2246 | 748 | CmpIndVar->getType()); |
2247 | 748 | if (SExtTrunc == IV748 ) { |
2248 | 537 | Extended = true; |
2249 | 537 | ExitCnt = Builder.CreateSExt(ExitCnt, IndVar->getType(), |
2250 | 537 | "wide.trip.count"); |
2251 | 537 | } |
2252 | 748 | } |
2253 | 26.7k | |
2254 | 26.7k | if (!Extended) |
2255 | 211 | CmpIndVar = Builder.CreateTrunc(CmpIndVar, ExitCnt->getType(), |
2256 | 211 | "lftr.wideiv"); |
2257 | 26.7k | } |
2258 | 36.6k | } |
2259 | 56.7k | Value *Cond = Builder.CreateICmp(P, CmpIndVar, ExitCnt, "exitcond"); |
2260 | 56.7k | Value *OrigCond = BI->getCondition(); |
2261 | 56.7k | // It's tempting to use replaceAllUsesWith here to fully replace the old |
2262 | 56.7k | // comparison, but that's not immediately safe, since users of the old |
2263 | 56.7k | // comparison may not be dominated by the new comparison. Instead, just |
2264 | 56.7k | // update the branch to use the new comparison; in the common case this |
2265 | 56.7k | // will make old comparison dead. |
2266 | 56.7k | BI->setCondition(Cond); |
2267 | 56.7k | DeadInsts.push_back(OrigCond); |
2268 | 56.7k | |
2269 | 56.7k | ++NumLFTR; |
2270 | 56.7k | Changed = true; |
2271 | 56.7k | return Cond; |
2272 | 56.7k | } |
2273 | | |
2274 | | //===----------------------------------------------------------------------===// |
2275 | | // sinkUnusedInvariants. A late subpass to cleanup loop preheaders. |
2276 | | //===----------------------------------------------------------------------===// |
2277 | | |
2278 | | /// If there's a single exit block, sink any loop-invariant values that |
2279 | | /// were defined in the preheader but not used inside the loop into the |
2280 | | /// exit block to reduce register pressure in the loop. |
2281 | 364k | void IndVarSimplify::sinkUnusedInvariants(Loop *L) { |
2282 | 364k | BasicBlock *ExitBlock = L->getExitBlock(); |
2283 | 364k | if (!ExitBlock364k ) return89.6k ; |
2284 | 274k | |
2285 | 274k | BasicBlock *Preheader = L->getLoopPreheader(); |
2286 | 274k | if (!Preheader274k ) return0 ; |
2287 | 274k | |
2288 | 274k | BasicBlock::iterator InsertPt = ExitBlock->getFirstInsertionPt(); |
2289 | 274k | BasicBlock::iterator I(Preheader->getTerminator()); |
2290 | 627k | while (I != Preheader->begin()627k ) { |
2291 | 384k | --I; |
2292 | 384k | // New instructions were inserted at the end of the preheader. |
2293 | 384k | if (isa<PHINode>(I)) |
2294 | 31.1k | break; |
2295 | 353k | |
2296 | 353k | // Don't move instructions which might have side effects, since the side |
2297 | 353k | // effects need to complete before instructions inside the loop. Also don't |
2298 | 353k | // move instructions which might read memory, since the loop may modify |
2299 | 353k | // memory. Note that it's okay if the instruction might have undefined |
2300 | 353k | // behavior: LoopSimplify guarantees that the preheader dominates the exit |
2301 | 353k | // block. |
2302 | 353k | if (353k I->mayHaveSideEffects() || 353k I->mayReadFromMemory()310k ) |
2303 | 82.4k | continue; |
2304 | 271k | |
2305 | 271k | // Skip debug info intrinsics. |
2306 | 271k | if (271k isa<DbgInfoIntrinsic>(I)271k ) |
2307 | 8 | continue; |
2308 | 271k | |
2309 | 271k | // Skip eh pad instructions. |
2310 | 271k | if (271k I->isEHPad()271k ) |
2311 | 1 | continue; |
2312 | 271k | |
2313 | 271k | // Don't sink alloca: we never want to sink static alloca's out of the |
2314 | 271k | // entry block, and correctly sinking dynamic alloca's requires |
2315 | 271k | // checks for stacksave/stackrestore intrinsics. |
2316 | 271k | // FIXME: Refactor this check somehow? |
2317 | 271k | if (271k isa<AllocaInst>(I)271k ) |
2318 | 4.37k | continue; |
2319 | 266k | |
2320 | 266k | // Determine if there is a use in or before the loop (direct or |
2321 | 266k | // otherwise). |
2322 | 266k | bool UsedInLoop = false; |
2323 | 362k | for (Use &U : I->uses()) { |
2324 | 362k | Instruction *User = cast<Instruction>(U.getUser()); |
2325 | 362k | BasicBlock *UseBB = User->getParent(); |
2326 | 362k | if (PHINode *P362k = dyn_cast<PHINode>(User)) { |
2327 | 27.1k | unsigned i = |
2328 | 27.1k | PHINode::getIncomingValueNumForOperand(U.getOperandNo()); |
2329 | 27.1k | UseBB = P->getIncomingBlock(i); |
2330 | 27.1k | } |
2331 | 362k | if (UseBB == Preheader || 362k L->contains(UseBB)224k ) { |
2332 | 257k | UsedInLoop = true; |
2333 | 257k | break; |
2334 | 257k | } |
2335 | 266k | } |
2336 | 266k | |
2337 | 266k | // If there is, the def must remain in the preheader. |
2338 | 266k | if (UsedInLoop) |
2339 | 257k | continue; |
2340 | 9.44k | |
2341 | 9.44k | // Otherwise, sink it to the exit block. |
2342 | 9.44k | Instruction *ToMove = &*I; |
2343 | 9.44k | bool Done = false; |
2344 | 9.44k | |
2345 | 9.44k | if (I != Preheader->begin()9.44k ) { |
2346 | 8.98k | // Skip debug info intrinsics. |
2347 | 8.98k | do { |
2348 | 8.98k | --I; |
2349 | 8.98k | } while (isa<DbgInfoIntrinsic>(I) && 8.98k I != Preheader->begin()0 ); |
2350 | 8.98k | |
2351 | 8.98k | if (isa<DbgInfoIntrinsic>(I) && 8.98k I == Preheader->begin()0 ) |
2352 | 0 | Done = true; |
2353 | 9.44k | } else { |
2354 | 467 | Done = true; |
2355 | 467 | } |
2356 | 9.44k | |
2357 | 9.44k | ToMove->moveBefore(*ExitBlock, InsertPt); |
2358 | 9.44k | if (Done9.44k ) break467 ; |
2359 | 8.98k | InsertPt = ToMove->getIterator(); |
2360 | 8.98k | } |
2361 | 364k | } |
2362 | | |
2363 | | //===----------------------------------------------------------------------===// |
2364 | | // IndVarSimplify driver. Manage several subpasses of IV simplification. |
2365 | | //===----------------------------------------------------------------------===// |
2366 | | |
2367 | 364k | bool IndVarSimplify::run(Loop *L) { |
2368 | 364k | // We need (and expect!) the incoming loop to be in LCSSA. |
2369 | 364k | assert(L->isRecursivelyLCSSAForm(*DT, *LI) && |
2370 | 364k | "LCSSA required to run indvars!"); |
2371 | 364k | |
2372 | 364k | // If LoopSimplify form is not available, stay out of trouble. Some notes: |
2373 | 364k | // - LSR currently only supports LoopSimplify-form loops. Indvars' |
2374 | 364k | // canonicalization can be a pessimization without LSR to "clean up" |
2375 | 364k | // afterwards. |
2376 | 364k | // - We depend on having a preheader; in particular, |
2377 | 364k | // Loop::getCanonicalInductionVariable only supports loops with preheaders, |
2378 | 364k | // and we're in trouble if we can't find the induction variable even when |
2379 | 364k | // we've manually inserted one. |
2380 | 364k | // - LFTR relies on having a single backedge. |
2381 | 364k | if (!L->isLoopSimplifyForm()) |
2382 | 6 | return false; |
2383 | 364k | |
2384 | 364k | // If there are any floating-point recurrences, attempt to |
2385 | 364k | // transform them to use integer recurrences. |
2386 | 364k | rewriteNonIntegerIVs(L); |
2387 | 364k | |
2388 | 364k | const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); |
2389 | 364k | |
2390 | 364k | // Create a rewriter object which we'll use to transform the code with. |
2391 | 364k | SCEVExpander Rewriter(*SE, DL, "indvars"); |
2392 | | #ifndef NDEBUG |
2393 | | Rewriter.setDebugType(DEBUG_TYPE); |
2394 | | #endif |
2395 | | |
2396 | 364k | // Eliminate redundant IV users. |
2397 | 364k | // |
2398 | 364k | // Simplification works best when run before other consumers of SCEV. We |
2399 | 364k | // attempt to avoid evaluating SCEVs for sign/zero extend operations until |
2400 | 364k | // other expressions involving loop IVs have been evaluated. This helps SCEV |
2401 | 364k | // set no-wrap flags before normalizing sign/zero extension. |
2402 | 364k | Rewriter.disableCanonicalMode(); |
2403 | 364k | simplifyAndExtend(L, Rewriter, LI); |
2404 | 364k | |
2405 | 364k | // Check to see if this loop has a computable loop-invariant execution count. |
2406 | 364k | // If so, this means that we can compute the final value of any expressions |
2407 | 364k | // that are recurrent in the loop, and substitute the exit values from the |
2408 | 364k | // loop into any instructions outside of the loop that use the final values of |
2409 | 364k | // the current expressions. |
2410 | 364k | // |
2411 | 364k | if (ReplaceExitValue != NeverRepl && |
2412 | 364k | !isa<SCEVCouldNotCompute>(BackedgeTakenCount)) |
2413 | 141k | rewriteLoopExitValues(L, Rewriter); |
2414 | 364k | |
2415 | 364k | // Eliminate redundant IV cycles. |
2416 | 364k | NumElimIV += Rewriter.replaceCongruentIVs(L, DT, DeadInsts); |
2417 | 364k | |
2418 | 364k | // If we have a trip count expression, rewrite the loop's exit condition |
2419 | 364k | // using it. We can currently only handle loops with a single exit. |
2420 | 364k | if (!DisableLFTR && 364k canExpandBackedgeTakenCount(L, SE, Rewriter)364k && |
2421 | 364k | needsLFTR(L, DT)119k ) { |
2422 | 64.6k | PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT); |
2423 | 64.6k | if (IndVar64.6k ) { |
2424 | 56.7k | // Check preconditions for proper SCEVExpander operation. SCEV does not |
2425 | 56.7k | // express SCEVExpander's dependencies, such as LoopSimplify. Instead any |
2426 | 56.7k | // pass that uses the SCEVExpander must do it. This does not work well for |
2427 | 56.7k | // loop passes because SCEVExpander makes assumptions about all loops, |
2428 | 56.7k | // while LoopPassManager only forces the current loop to be simplified. |
2429 | 56.7k | // |
2430 | 56.7k | // FIXME: SCEV expansion has no way to bail out, so the caller must |
2431 | 56.7k | // explicitly check any assumptions made by SCEV. Brittle. |
2432 | 56.7k | const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(BackedgeTakenCount); |
2433 | 56.7k | if (!AR || 56.7k AR->getLoop()->getLoopPreheader()404 ) |
2434 | 56.7k | (void)linearFunctionTestReplace(L, BackedgeTakenCount, IndVar, |
2435 | 56.7k | Rewriter); |
2436 | 56.7k | } |
2437 | 64.6k | } |
2438 | 364k | // Clear the rewriter cache, because values that are in the rewriter's cache |
2439 | 364k | // can be deleted in the loop below, causing the AssertingVH in the cache to |
2440 | 364k | // trigger. |
2441 | 364k | Rewriter.clear(); |
2442 | 364k | |
2443 | 364k | // Now that we're done iterating through lists, clean up any instructions |
2444 | 364k | // which are now dead. |
2445 | 487k | while (!DeadInsts.empty()) |
2446 | 123k | if (Instruction *123k Inst123k = |
2447 | 123k | dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val())) |
2448 | 123k | RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI); |
2449 | 364k | |
2450 | 364k | // The Rewriter may not be used from this point on. |
2451 | 364k | |
2452 | 364k | // Loop-invariant instructions in the preheader that aren't used in the |
2453 | 364k | // loop may be sunk below the loop to reduce register pressure. |
2454 | 364k | sinkUnusedInvariants(L); |
2455 | 364k | |
2456 | 364k | // rewriteFirstIterationLoopExitValues does not rely on the computation of |
2457 | 364k | // trip count and therefore can further simplify exit values in addition to |
2458 | 364k | // rewriteLoopExitValues. |
2459 | 364k | rewriteFirstIterationLoopExitValues(L); |
2460 | 364k | |
2461 | 364k | // Clean up dead instructions. |
2462 | 364k | Changed |= DeleteDeadPHIs(L->getHeader(), TLI); |
2463 | 364k | |
2464 | 364k | // Check a post-condition. |
2465 | 364k | assert(L->isRecursivelyLCSSAForm(*DT, *LI) && |
2466 | 364k | "Indvars did not preserve LCSSA!"); |
2467 | 364k | |
2468 | 364k | // Verify that LFTR, and any other change have not interfered with SCEV's |
2469 | 364k | // ability to compute trip count. |
2470 | | #ifndef NDEBUG |
2471 | | if (VerifyIndvars && !isa<SCEVCouldNotCompute>(BackedgeTakenCount)) { |
2472 | | SE->forgetLoop(L); |
2473 | | const SCEV *NewBECount = SE->getBackedgeTakenCount(L); |
2474 | | if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) < |
2475 | | SE->getTypeSizeInBits(NewBECount->getType())) |
2476 | | NewBECount = SE->getTruncateOrNoop(NewBECount, |
2477 | | BackedgeTakenCount->getType()); |
2478 | | else |
2479 | | BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, |
2480 | | NewBECount->getType()); |
2481 | | assert(BackedgeTakenCount == NewBECount && "indvars must preserve SCEV"); |
2482 | | } |
2483 | | #endif |
2484 | | |
2485 | 364k | return Changed; |
2486 | 364k | } |
2487 | | |
2488 | | PreservedAnalyses IndVarSimplifyPass::run(Loop &L, LoopAnalysisManager &AM, |
2489 | | LoopStandardAnalysisResults &AR, |
2490 | 58 | LPMUpdater &) { |
2491 | 58 | Function *F = L.getHeader()->getParent(); |
2492 | 58 | const DataLayout &DL = F->getParent()->getDataLayout(); |
2493 | 58 | |
2494 | 58 | IndVarSimplify IVS(&AR.LI, &AR.SE, &AR.DT, DL, &AR.TLI, &AR.TTI); |
2495 | 58 | if (!IVS.run(&L)) |
2496 | 26 | return PreservedAnalyses::all(); |
2497 | 32 | |
2498 | 32 | auto PA = getLoopPassPreservedAnalyses(); |
2499 | 32 | PA.preserveSet<CFGAnalyses>(); |
2500 | 32 | return PA; |
2501 | 32 | } |
2502 | | |
2503 | | namespace { |
2504 | | struct IndVarSimplifyLegacyPass : public LoopPass { |
2505 | | static char ID; // Pass identification, replacement for typeid |
2506 | 17.6k | IndVarSimplifyLegacyPass() : LoopPass(ID) { |
2507 | 17.6k | initializeIndVarSimplifyLegacyPassPass(*PassRegistry::getPassRegistry()); |
2508 | 17.6k | } |
2509 | | |
2510 | 364k | bool runOnLoop(Loop *L, LPPassManager &LPM) override { |
2511 | 364k | if (skipLoop(L)) |
2512 | 15 | return false; |
2513 | 364k | |
2514 | 364k | auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); |
2515 | 364k | auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); |
2516 | 364k | auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); |
2517 | 364k | auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); |
2518 | 364k | auto *TLI = TLIP ? &TLIP->getTLI()364k : nullptr0 ; |
2519 | 364k | auto *TTIP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>(); |
2520 | 364k | auto *TTI = TTIP ? &TTIP->getTTI(*L->getHeader()->getParent())364k : nullptr0 ; |
2521 | 364k | const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); |
2522 | 364k | |
2523 | 364k | IndVarSimplify IVS(LI, SE, DT, DL, TLI, TTI); |
2524 | 364k | return IVS.run(L); |
2525 | 364k | } |
2526 | | |
2527 | 17.6k | void getAnalysisUsage(AnalysisUsage &AU) const override { |
2528 | 17.6k | AU.setPreservesCFG(); |
2529 | 17.6k | getLoopAnalysisUsage(AU); |
2530 | 17.6k | } |
2531 | | }; |
2532 | | } |
2533 | | |
2534 | | char IndVarSimplifyLegacyPass::ID = 0; |
2535 | 41.8k | INITIALIZE_PASS_BEGIN41.8k (IndVarSimplifyLegacyPass, "indvars",
|
2536 | 41.8k | "Induction Variable Simplification", false, false) |
2537 | 41.8k | INITIALIZE_PASS_DEPENDENCY(LoopPass) |
2538 | 41.8k | INITIALIZE_PASS_END(IndVarSimplifyLegacyPass, "indvars", |
2539 | | "Induction Variable Simplification", false, false) |
2540 | | |
2541 | 17.4k | Pass *llvm::createIndVarSimplifyPass() { |
2542 | 17.4k | return new IndVarSimplifyLegacyPass(); |
2543 | 17.4k | } |