/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/lib/Transforms/Utils/LoopUnrollPeel.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- UnrollLoopPeel.cpp - Loop peeling utilities -----------------------===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | // |
10 | | // This file implements some loop unrolling utilities for peeling loops |
11 | | // with dynamically inferred (from PGO) trip counts. See LoopUnroll.cpp for |
12 | | // unrolling loops with compile-time constant trip counts. |
13 | | // |
14 | | //===----------------------------------------------------------------------===// |
15 | | |
16 | | #include "llvm/ADT/Statistic.h" |
17 | | #include "llvm/Analysis/LoopIterator.h" |
18 | | #include "llvm/Analysis/LoopPass.h" |
19 | | #include "llvm/Analysis/ScalarEvolution.h" |
20 | | #include "llvm/Analysis/TargetTransformInfo.h" |
21 | | #include "llvm/IR/BasicBlock.h" |
22 | | #include "llvm/IR/Dominators.h" |
23 | | #include "llvm/IR/MDBuilder.h" |
24 | | #include "llvm/IR/Metadata.h" |
25 | | #include "llvm/IR/Module.h" |
26 | | #include "llvm/Support/Debug.h" |
27 | | #include "llvm/Support/raw_ostream.h" |
28 | | #include "llvm/Transforms/Scalar.h" |
29 | | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
30 | | #include "llvm/Transforms/Utils/Cloning.h" |
31 | | #include "llvm/Transforms/Utils/LoopSimplify.h" |
32 | | #include "llvm/Transforms/Utils/LoopUtils.h" |
33 | | #include "llvm/Transforms/Utils/UnrollLoop.h" |
34 | | #include <algorithm> |
35 | | |
36 | | using namespace llvm; |
37 | | |
38 | | #define DEBUG_TYPE "loop-unroll" |
39 | | STATISTIC(NumPeeled, "Number of loops peeled"); |
40 | | |
41 | | static cl::opt<unsigned> UnrollPeelMaxCount( |
42 | | "unroll-peel-max-count", cl::init(7), cl::Hidden, |
43 | | cl::desc("Max average trip count which will cause loop peeling.")); |
44 | | |
45 | | static cl::opt<unsigned> UnrollForcePeelCount( |
46 | | "unroll-force-peel-count", cl::init(0), cl::Hidden, |
47 | | cl::desc("Force a peel count regardless of profiling information.")); |
48 | | |
49 | | // Designates that a Phi is estimated to become invariant after an "infinite" |
50 | | // number of loop iterations (i.e. only may become an invariant if the loop is |
51 | | // fully unrolled). |
52 | | static const unsigned InfiniteIterationsToInvariance = UINT_MAX; |
53 | | |
54 | | // Check whether we are capable of peeling this loop. |
55 | 634k | static bool canPeel(Loop *L) { |
56 | 634k | // Make sure the loop is in simplified form |
57 | 634k | if (!L->isLoopSimplifyForm()) |
58 | 0 | return false; |
59 | 634k | |
60 | 634k | // Only peel loops that contain a single exit |
61 | 634k | if (634k !L->getExitingBlock() || 634k !L->getUniqueExitBlock()469k ) |
62 | 166k | return false; |
63 | 467k | |
64 | 467k | // Don't try to peel loops where the latch is not the exiting block. |
65 | 467k | // This can be an indication of two different things: |
66 | 467k | // 1) The loop is not rotated. |
67 | 467k | // 2) The loop contains irreducible control flow that involves the latch. |
68 | 467k | if (467k L->getLoopLatch() != L->getExitingBlock()467k ) |
69 | 14.6k | return false; |
70 | 453k | |
71 | 453k | return true; |
72 | 453k | } |
73 | | |
74 | | // This function calculates the number of iterations after which the given Phi |
75 | | // becomes an invariant. The pre-calculated values are memorized in the map. The |
76 | | // function (shortcut is I) is calculated according to the following definition: |
77 | | // Given %x = phi <Inputs from above the loop>, ..., [%y, %back.edge]. |
78 | | // If %y is a loop invariant, then I(%x) = 1. |
79 | | // If %y is a Phi from the loop header, I(%x) = I(%y) + 1. |
80 | | // Otherwise, I(%x) is infinite. |
81 | | // TODO: Actually if %y is an expression that depends only on Phi %z and some |
82 | | // loop invariants, we can estimate I(%x) = I(%z) + 1. The example |
83 | | // looks like: |
84 | | // %x = phi(0, %a), <-- becomes invariant starting from 3rd iteration. |
85 | | // %y = phi(0, 5), |
86 | | // %a = %y + 1. |
87 | | static unsigned calculateIterationsToInvariance( |
88 | | PHINode *Phi, Loop *L, BasicBlock *BackEdge, |
89 | 477k | SmallDenseMap<PHINode *, unsigned> &IterationsToInvariance) { |
90 | 477k | assert(Phi->getParent() == L->getHeader() && |
91 | 477k | "Non-loop Phi should not be checked for turning into invariant."); |
92 | 477k | assert(BackEdge == L->getLoopLatch() && "Wrong latch?"); |
93 | 477k | // If we already know the answer, take it from the map. |
94 | 477k | auto I = IterationsToInvariance.find(Phi); |
95 | 477k | if (I != IterationsToInvariance.end()) |
96 | 649 | return I->second; |
97 | 477k | |
98 | 477k | // Otherwise we need to analyze the input from the back edge. |
99 | 477k | Value *Input = Phi->getIncomingValueForBlock(BackEdge); |
100 | 477k | // Place infinity to map to avoid infinite recursion for cycled Phis. Such |
101 | 477k | // cycles can never stop on an invariant. |
102 | 477k | IterationsToInvariance[Phi] = InfiniteIterationsToInvariance; |
103 | 477k | unsigned ToInvariance = InfiniteIterationsToInvariance; |
104 | 477k | |
105 | 477k | if (L->isLoopInvariant(Input)) |
106 | 372 | ToInvariance = 1u; |
107 | 476k | else if (PHINode *476k IncPhi476k = dyn_cast<PHINode>(Input)) { |
108 | 16.3k | // Only consider Phis in header block. |
109 | 16.3k | if (IncPhi->getParent() != L->getHeader()) |
110 | 15.6k | return InfiniteIterationsToInvariance; |
111 | 649 | // If the input becomes an invariant after X iterations, then our Phi |
112 | 649 | // becomes an invariant after X + 1 iterations. |
113 | 649 | unsigned InputToInvariance = calculateIterationsToInvariance( |
114 | 649 | IncPhi, L, BackEdge, IterationsToInvariance); |
115 | 649 | if (InputToInvariance != InfiniteIterationsToInvariance) |
116 | 5 | ToInvariance = InputToInvariance + 1u; |
117 | 476k | } |
118 | 477k | |
119 | 477k | // If we found that this Phi lies in an invariant chain, update the map. |
120 | 461k | if (461k ToInvariance != InfiniteIterationsToInvariance461k ) |
121 | 377 | IterationsToInvariance[Phi] = ToInvariance; |
122 | 461k | return ToInvariance; |
123 | 477k | } |
124 | | |
125 | | // Return the number of iterations we want to peel off. |
126 | | void llvm::computePeelCount(Loop *L, unsigned LoopSize, |
127 | | TargetTransformInfo::UnrollingPreferences &UP, |
128 | 634k | unsigned &TripCount) { |
129 | 634k | assert(LoopSize > 0 && "Zero loop size is not allowed!"); |
130 | 634k | UP.PeelCount = 0; |
131 | 634k | if (!canPeel(L)) |
132 | 181k | return; |
133 | 452k | |
134 | 452k | // Only try to peel innermost loops. |
135 | 452k | if (452k !L->empty()452k ) |
136 | 70.8k | return; |
137 | 381k | |
138 | 381k | // Here we try to get rid of Phis which become invariants after 1, 2, ..., N |
139 | 381k | // iterations of the loop. For this we compute the number for iterations after |
140 | 381k | // which every Phi is guaranteed to become an invariant, and try to peel the |
141 | 381k | // maximum number of iterations among these values, thus turning all those |
142 | 381k | // Phis into invariants. |
143 | 381k | // First, check that we can peel at least one iteration. |
144 | 381k | if (381k 2 * LoopSize <= UP.Threshold && 381k UnrollPeelMaxCount > 0379k ) { |
145 | 379k | // Store the pre-calculated values here. |
146 | 379k | SmallDenseMap<PHINode *, unsigned> IterationsToInvariance; |
147 | 379k | // Now go through all Phis to calculate their the number of iterations they |
148 | 379k | // need to become invariants. |
149 | 379k | unsigned DesiredPeelCount = 0; |
150 | 379k | BasicBlock *BackEdge = L->getLoopLatch(); |
151 | 379k | assert(BackEdge && "Loop is not in simplified form?"); |
152 | 857k | for (auto BI = L->getHeader()->begin(); isa<PHINode>(&*BI)857k ; ++BI477k ) { |
153 | 477k | PHINode *Phi = cast<PHINode>(&*BI); |
154 | 477k | unsigned ToInvariance = calculateIterationsToInvariance( |
155 | 477k | Phi, L, BackEdge, IterationsToInvariance); |
156 | 477k | if (ToInvariance != InfiniteIterationsToInvariance) |
157 | 377 | DesiredPeelCount = std::max(DesiredPeelCount, ToInvariance); |
158 | 477k | } |
159 | 379k | if (DesiredPeelCount > 0379k ) { |
160 | 330 | // Pay respect to limitations implied by loop size and the max peel count. |
161 | 330 | unsigned MaxPeelCount = UnrollPeelMaxCount; |
162 | 330 | MaxPeelCount = std::min(MaxPeelCount, UP.Threshold / LoopSize - 1); |
163 | 330 | DesiredPeelCount = std::min(DesiredPeelCount, MaxPeelCount); |
164 | 330 | // Consider max peel count limitation. |
165 | 330 | assert(DesiredPeelCount > 0 && "Wrong loop size estimation?"); |
166 | 330 | DEBUG(dbgs() << "Peel " << DesiredPeelCount << " iteration(s) to turn" |
167 | 330 | << " some Phis into invariants.\n"); |
168 | 330 | UP.PeelCount = DesiredPeelCount; |
169 | 330 | return; |
170 | 330 | } |
171 | 381k | } |
172 | 381k | |
173 | 381k | // Bail if we know the statically calculated trip count. |
174 | 381k | // In this case we rather prefer partial unrolling. |
175 | 381k | if (381k TripCount381k ) |
176 | 54.5k | return; |
177 | 327k | |
178 | 327k | // If the user provided a peel count, use that. |
179 | 327k | bool UserPeelCount = UnrollForcePeelCount.getNumOccurrences() > 0; |
180 | 327k | if (UserPeelCount327k ) { |
181 | 3 | DEBUG(dbgs() << "Force-peeling first " << UnrollForcePeelCount |
182 | 3 | << " iterations.\n"); |
183 | 3 | UP.PeelCount = UnrollForcePeelCount; |
184 | 3 | return; |
185 | 3 | } |
186 | 327k | |
187 | 327k | // If we don't know the trip count, but have reason to believe the average |
188 | 327k | // trip count is low, peeling should be beneficial, since we will usually |
189 | 327k | // hit the peeled section. |
190 | 327k | // We only do this in the presence of profile information, since otherwise |
191 | 327k | // our estimates of the trip count are not reliable enough. |
192 | 327k | if (327k UP.AllowPeeling && 327k L->getHeader()->getParent()->getEntryCount()162k ) { |
193 | 5 | Optional<unsigned> PeelCount = getLoopEstimatedTripCount(L); |
194 | 5 | if (!PeelCount) |
195 | 2 | return; |
196 | 3 | |
197 | 3 | DEBUG3 (dbgs() << "Profile-based estimated trip count is " << *PeelCount |
198 | 3 | << "\n"); |
199 | 3 | |
200 | 3 | if (*PeelCount3 ) { |
201 | 1 | if ((*PeelCount <= UnrollPeelMaxCount) && |
202 | 1 | (LoopSize * (*PeelCount + 1) <= UP.Threshold)0 ) { |
203 | 0 | DEBUG(dbgs() << "Peeling first " << *PeelCount << " iterations.\n"); |
204 | 0 | UP.PeelCount = *PeelCount; |
205 | 0 | return; |
206 | 0 | } |
207 | 1 | DEBUG1 (dbgs() << "Requested peel count: " << *PeelCount << "\n"); |
208 | 1 | DEBUG(dbgs() << "Max peel count: " << UnrollPeelMaxCount << "\n"); |
209 | 1 | DEBUG(dbgs() << "Peel cost: " << LoopSize * (*PeelCount + 1) << "\n"); |
210 | 1 | DEBUG(dbgs() << "Max peel cost: " << UP.Threshold << "\n"); |
211 | 1 | } |
212 | 5 | } |
213 | 327k | |
214 | 327k | return; |
215 | 634k | } |
216 | | |
217 | | /// \brief Update the branch weights of the latch of a peeled-off loop |
218 | | /// iteration. |
219 | | /// This sets the branch weights for the latch of the recently peeled off loop |
220 | | /// iteration correctly. |
221 | | /// Our goal is to make sure that: |
222 | | /// a) The total weight of all the copies of the loop body is preserved. |
223 | | /// b) The total weight of the loop exit is preserved. |
224 | | /// c) The body weight is reasonably distributed between the peeled iterations. |
225 | | /// |
226 | | /// \param Header The copy of the header block that belongs to next iteration. |
227 | | /// \param LatchBR The copy of the latch branch that belongs to this iteration. |
228 | | /// \param IterNumber The serial number of the iteration that was just |
229 | | /// peeled off. |
230 | | /// \param AvgIters The average number of iterations we expect the loop to have. |
231 | | /// \param[in,out] PeeledHeaderWeight The total number of dynamic loop |
232 | | /// iterations that are unaccounted for. As an input, it represents the number |
233 | | /// of times we expect to enter the header of the iteration currently being |
234 | | /// peeled off. The output is the number of times we expect to enter the |
235 | | /// header of the next iteration. |
236 | | static void updateBranchWeights(BasicBlock *Header, BranchInst *LatchBR, |
237 | | unsigned IterNumber, unsigned AvgIters, |
238 | 342 | uint64_t &PeeledHeaderWeight) { |
239 | 342 | |
240 | 342 | // FIXME: Pick a more realistic distribution. |
241 | 342 | // Currently the proportion of weight we assign to the fall-through |
242 | 342 | // side of the branch drops linearly with the iteration number, and we use |
243 | 342 | // a 0.9 fudge factor to make the drop-off less sharp... |
244 | 342 | if (PeeledHeaderWeight342 ) { |
245 | 0 | uint64_t FallThruWeight = |
246 | 0 | PeeledHeaderWeight * ((float)(AvgIters - IterNumber) / AvgIters * 0.9); |
247 | 0 | uint64_t ExitWeight = PeeledHeaderWeight - FallThruWeight; |
248 | 0 | PeeledHeaderWeight -= ExitWeight; |
249 | 0 |
|
250 | 0 | unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 00 : 10 ); |
251 | 0 | MDBuilder MDB(LatchBR->getContext()); |
252 | 0 | MDNode *WeightNode = |
253 | 0 | HeaderIdx ? MDB.createBranchWeights(ExitWeight, FallThruWeight) |
254 | 0 | : MDB.createBranchWeights(FallThruWeight, ExitWeight); |
255 | 0 | LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode); |
256 | 0 | } |
257 | 342 | } |
258 | | |
259 | | /// \brief Clones the body of the loop L, putting it between \p InsertTop and \p |
260 | | /// InsertBot. |
261 | | /// \param IterNumber The serial number of the iteration currently being |
262 | | /// peeled off. |
263 | | /// \param Exit The exit block of the original loop. |
264 | | /// \param[out] NewBlocks A list of the the blocks in the newly created clone |
265 | | /// \param[out] VMap The value map between the loop and the new clone. |
266 | | /// \param LoopBlocks A helper for DFS-traversal of the loop. |
267 | | /// \param LVMap A value-map that maps instructions from the original loop to |
268 | | /// instructions in the last peeled-off iteration. |
269 | | static void cloneLoopBlocks(Loop *L, unsigned IterNumber, BasicBlock *InsertTop, |
270 | | BasicBlock *InsertBot, BasicBlock *Exit, |
271 | | SmallVectorImpl<BasicBlock *> &NewBlocks, |
272 | | LoopBlocksDFS &LoopBlocks, ValueToValueMapTy &VMap, |
273 | | ValueToValueMapTy &LVMap, DominatorTree *DT, |
274 | 342 | LoopInfo *LI) { |
275 | 342 | |
276 | 342 | BasicBlock *Header = L->getHeader(); |
277 | 342 | BasicBlock *Latch = L->getLoopLatch(); |
278 | 342 | BasicBlock *PreHeader = L->getLoopPreheader(); |
279 | 342 | |
280 | 342 | Function *F = Header->getParent(); |
281 | 342 | LoopBlocksDFS::RPOIterator BlockBegin = LoopBlocks.beginRPO(); |
282 | 342 | LoopBlocksDFS::RPOIterator BlockEnd = LoopBlocks.endRPO(); |
283 | 342 | Loop *ParentLoop = L->getParentLoop(); |
284 | 342 | |
285 | 342 | // For each block in the original loop, create a new copy, |
286 | 342 | // and update the value map with the newly created values. |
287 | 1.34k | for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd1.34k ; ++BB1.00k ) { |
288 | 1.00k | BasicBlock *NewBB = CloneBasicBlock(*BB, VMap, ".peel", F); |
289 | 1.00k | NewBlocks.push_back(NewBB); |
290 | 1.00k | |
291 | 1.00k | if (ParentLoop) |
292 | 270 | ParentLoop->addBasicBlockToLoop(NewBB, *LI); |
293 | 1.00k | |
294 | 1.00k | VMap[*BB] = NewBB; |
295 | 1.00k | |
296 | 1.00k | // If dominator tree is available, insert nodes to represent cloned blocks. |
297 | 1.00k | if (DT1.00k ) { |
298 | 1.00k | if (Header == *BB) |
299 | 342 | DT->addNewBlock(NewBB, InsertTop); |
300 | 662 | else { |
301 | 662 | DomTreeNode *IDom = DT->getNode(*BB)->getIDom(); |
302 | 662 | // VMap must contain entry for IDom, as the iteration order is RPO. |
303 | 662 | DT->addNewBlock(NewBB, cast<BasicBlock>(VMap[IDom->getBlock()])); |
304 | 662 | } |
305 | 1.00k | } |
306 | 1.00k | } |
307 | 342 | |
308 | 342 | // Hook-up the control flow for the newly inserted blocks. |
309 | 342 | // The new header is hooked up directly to the "top", which is either |
310 | 342 | // the original loop preheader (for the first iteration) or the previous |
311 | 342 | // iteration's exiting block (for every other iteration) |
312 | 342 | InsertTop->getTerminator()->setSuccessor(0, cast<BasicBlock>(VMap[Header])); |
313 | 342 | |
314 | 342 | // Similarly, for the latch: |
315 | 342 | // The original exiting edge is still hooked up to the loop exit. |
316 | 342 | // The backedge now goes to the "bottom", which is either the loop's real |
317 | 342 | // header (for the last peeled iteration) or the copied header of the next |
318 | 342 | // iteration (for every other iteration) |
319 | 342 | BasicBlock *NewLatch = cast<BasicBlock>(VMap[Latch]); |
320 | 342 | BranchInst *LatchBR = cast<BranchInst>(NewLatch->getTerminator()); |
321 | 342 | unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0263 : 179 ); |
322 | 342 | LatchBR->setSuccessor(HeaderIdx, InsertBot); |
323 | 342 | LatchBR->setSuccessor(1 - HeaderIdx, Exit); |
324 | 342 | if (DT) |
325 | 342 | DT->changeImmediateDominator(InsertBot, NewLatch); |
326 | 342 | |
327 | 342 | // The new copy of the loop body starts with a bunch of PHI nodes |
328 | 342 | // that pick an incoming value from either the preheader, or the previous |
329 | 342 | // loop iteration. Since this copy is no longer part of the loop, we |
330 | 342 | // resolve this statically: |
331 | 342 | // For the first iteration, we use the value from the preheader directly. |
332 | 342 | // For any other iteration, we replace the phi with the value generated by |
333 | 342 | // the immediately preceding clone of the loop body (which represents |
334 | 342 | // the previous iteration). |
335 | 1.26k | for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I)1.26k ; ++I921 ) { |
336 | 921 | PHINode *NewPHI = cast<PHINode>(VMap[&*I]); |
337 | 921 | if (IterNumber == 0921 ) { |
338 | 894 | VMap[&*I] = NewPHI->getIncomingValueForBlock(PreHeader); |
339 | 921 | } else { |
340 | 27 | Value *LatchVal = NewPHI->getIncomingValueForBlock(Latch); |
341 | 27 | Instruction *LatchInst = dyn_cast<Instruction>(LatchVal); |
342 | 27 | if (LatchInst && 27 L->contains(LatchInst)23 ) |
343 | 23 | VMap[&*I] = LVMap[LatchInst]; |
344 | 27 | else |
345 | 4 | VMap[&*I] = LatchVal; |
346 | 27 | } |
347 | 921 | cast<BasicBlock>(VMap[Header])->getInstList().erase(NewPHI); |
348 | 921 | } |
349 | 342 | |
350 | 342 | // Fix up the outgoing values - we need to add a value for the iteration |
351 | 342 | // we've just created. Note that this must happen *after* the incoming |
352 | 342 | // values are adjusted, since the value going out of the latch may also be |
353 | 342 | // a value coming into the header. |
354 | 451 | for (BasicBlock::iterator I = Exit->begin(); isa<PHINode>(I)451 ; ++I109 ) { |
355 | 109 | PHINode *PHI = cast<PHINode>(I); |
356 | 109 | Value *LatchVal = PHI->getIncomingValueForBlock(Latch); |
357 | 109 | Instruction *LatchInst = dyn_cast<Instruction>(LatchVal); |
358 | 109 | if (LatchInst && 109 L->contains(LatchInst)109 ) |
359 | 109 | LatchVal = VMap[LatchVal]; |
360 | 109 | PHI->addIncoming(LatchVal, cast<BasicBlock>(VMap[Latch])); |
361 | 109 | } |
362 | 342 | |
363 | 342 | // LastValueMap is updated with the values for the current loop |
364 | 342 | // which are used the next time this function is called. |
365 | 342 | for (const auto &KV : VMap) |
366 | 8.77k | LVMap[KV.first] = KV.second; |
367 | 342 | } |
368 | | |
369 | | /// \brief Peel off the first \p PeelCount iterations of loop \p L. |
370 | | /// |
371 | | /// Note that this does not peel them off as a single straight-line block. |
372 | | /// Rather, each iteration is peeled off separately, and needs to check the |
373 | | /// exit condition. |
374 | | /// For loops that dynamically execute \p PeelCount iterations or less |
375 | | /// this provides a benefit, since the peeled off iterations, which account |
376 | | /// for the bulk of dynamic execution, can be further simplified by scalar |
377 | | /// optimizations. |
378 | | bool llvm::peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI, |
379 | | ScalarEvolution *SE, DominatorTree *DT, |
380 | 334 | AssumptionCache *AC, bool PreserveLCSSA) { |
381 | 334 | if (!canPeel(L)) |
382 | 0 | return false; |
383 | 334 | |
384 | 334 | LoopBlocksDFS LoopBlocks(L); |
385 | 334 | LoopBlocks.perform(LI); |
386 | 334 | |
387 | 334 | BasicBlock *Header = L->getHeader(); |
388 | 334 | BasicBlock *PreHeader = L->getLoopPreheader(); |
389 | 334 | BasicBlock *Latch = L->getLoopLatch(); |
390 | 334 | BasicBlock *Exit = L->getUniqueExitBlock(); |
391 | 334 | |
392 | 334 | Function *F = Header->getParent(); |
393 | 334 | |
394 | 334 | // Set up all the necessary basic blocks. It is convenient to split the |
395 | 334 | // preheader into 3 parts - two blocks to anchor the peeled copy of the loop |
396 | 334 | // body, and a new preheader for the "real" loop. |
397 | 334 | |
398 | 334 | // Peeling the first iteration transforms. |
399 | 334 | // |
400 | 334 | // PreHeader: |
401 | 334 | // ... |
402 | 334 | // Header: |
403 | 334 | // LoopBody |
404 | 334 | // If (cond) goto Header |
405 | 334 | // Exit: |
406 | 334 | // |
407 | 334 | // into |
408 | 334 | // |
409 | 334 | // InsertTop: |
410 | 334 | // LoopBody |
411 | 334 | // If (!cond) goto Exit |
412 | 334 | // InsertBot: |
413 | 334 | // NewPreHeader: |
414 | 334 | // ... |
415 | 334 | // Header: |
416 | 334 | // LoopBody |
417 | 334 | // If (cond) goto Header |
418 | 334 | // Exit: |
419 | 334 | // |
420 | 334 | // Each following iteration will split the current bottom anchor in two, |
421 | 334 | // and put the new copy of the loop body between these two blocks. That is, |
422 | 334 | // after peeling another iteration from the example above, we'll split |
423 | 334 | // InsertBot, and get: |
424 | 334 | // |
425 | 334 | // InsertTop: |
426 | 334 | // LoopBody |
427 | 334 | // If (!cond) goto Exit |
428 | 334 | // InsertBot: |
429 | 334 | // LoopBody |
430 | 334 | // If (!cond) goto Exit |
431 | 334 | // InsertBot.next: |
432 | 334 | // NewPreHeader: |
433 | 334 | // ... |
434 | 334 | // Header: |
435 | 334 | // LoopBody |
436 | 334 | // If (cond) goto Header |
437 | 334 | // Exit: |
438 | 334 | |
439 | 334 | BasicBlock *InsertTop = SplitEdge(PreHeader, Header, DT, LI); |
440 | 334 | BasicBlock *InsertBot = |
441 | 334 | SplitBlock(InsertTop, InsertTop->getTerminator(), DT, LI); |
442 | 334 | BasicBlock *NewPreHeader = |
443 | 334 | SplitBlock(InsertBot, InsertBot->getTerminator(), DT, LI); |
444 | 334 | |
445 | 334 | InsertTop->setName(Header->getName() + ".peel.begin"); |
446 | 334 | InsertBot->setName(Header->getName() + ".peel.next"); |
447 | 334 | NewPreHeader->setName(PreHeader->getName() + ".peel.newph"); |
448 | 334 | |
449 | 334 | ValueToValueMapTy LVMap; |
450 | 334 | |
451 | 334 | // If we have branch weight information, we'll want to update it for the |
452 | 334 | // newly created branches. |
453 | 334 | BranchInst *LatchBR = |
454 | 334 | cast<BranchInst>(cast<BasicBlock>(Latch)->getTerminator()); |
455 | 334 | unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0255 : 179 ); |
456 | 334 | |
457 | 334 | uint64_t TrueWeight, FalseWeight; |
458 | 334 | uint64_t ExitWeight = 0, CurHeaderWeight = 0; |
459 | 334 | if (LatchBR->extractProfMetadata(TrueWeight, FalseWeight)334 ) { |
460 | 0 | ExitWeight = HeaderIdx ? TrueWeight0 : FalseWeight0 ; |
461 | 0 | // The # of times the loop body executes is the sum of the exit block |
462 | 0 | // weight and the # of times the backedges are taken. |
463 | 0 | CurHeaderWeight = TrueWeight + FalseWeight; |
464 | 0 | } |
465 | 334 | |
466 | 334 | // For each peeled-off iteration, make a copy of the loop. |
467 | 676 | for (unsigned Iter = 0; Iter < PeelCount676 ; ++Iter342 ) { |
468 | 342 | SmallVector<BasicBlock *, 8> NewBlocks; |
469 | 342 | ValueToValueMapTy VMap; |
470 | 342 | |
471 | 342 | // Subtract the exit weight from the current header weight -- the exit |
472 | 342 | // weight is exactly the weight of the previous iteration's header. |
473 | 342 | // FIXME: due to the way the distribution is constructed, we need a |
474 | 342 | // guard here to make sure we don't end up with non-positive weights. |
475 | 342 | if (ExitWeight < CurHeaderWeight) |
476 | 8 | CurHeaderWeight -= ExitWeight; |
477 | 342 | else |
478 | 334 | CurHeaderWeight = 1; |
479 | 342 | |
480 | 342 | cloneLoopBlocks(L, Iter, InsertTop, InsertBot, Exit, |
481 | 342 | NewBlocks, LoopBlocks, VMap, LVMap, DT, LI); |
482 | 342 | |
483 | 342 | // Remap to use values from the current iteration instead of the |
484 | 342 | // previous one. |
485 | 342 | remapInstructionsInBlocks(NewBlocks, VMap); |
486 | 342 | |
487 | 342 | if (DT342 ) { |
488 | 342 | // Latches of the cloned loops dominate over the loop exit, so idom of the |
489 | 342 | // latter is the first cloned loop body, as original PreHeader dominates |
490 | 342 | // the original loop body. |
491 | 342 | if (Iter == 0) |
492 | 334 | DT->changeImmediateDominator(Exit, cast<BasicBlock>(LVMap[Latch])); |
493 | | #ifndef NDEBUG |
494 | | if (VerifyDomInfo) |
495 | | DT->verifyDomTree(); |
496 | | #endif |
497 | | } |
498 | 342 | |
499 | 342 | updateBranchWeights(InsertBot, cast<BranchInst>(VMap[LatchBR]), Iter, |
500 | 342 | PeelCount, ExitWeight); |
501 | 342 | |
502 | 342 | InsertTop = InsertBot; |
503 | 342 | InsertBot = SplitBlock(InsertBot, InsertBot->getTerminator(), DT, LI); |
504 | 342 | InsertBot->setName(Header->getName() + ".peel.next"); |
505 | 342 | |
506 | 342 | F->getBasicBlockList().splice(InsertTop->getIterator(), |
507 | 342 | F->getBasicBlockList(), |
508 | 342 | NewBlocks[0]->getIterator(), F->end()); |
509 | 342 | } |
510 | 334 | |
511 | 334 | // Now adjust the phi nodes in the loop header to get their initial values |
512 | 334 | // from the last peeled-off iteration instead of the preheader. |
513 | 1.22k | for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I)1.22k ; ++I894 ) { |
514 | 894 | PHINode *PHI = cast<PHINode>(I); |
515 | 894 | Value *NewVal = PHI->getIncomingValueForBlock(Latch); |
516 | 894 | Instruction *LatchInst = dyn_cast<Instruction>(NewVal); |
517 | 894 | if (LatchInst && 894 L->contains(LatchInst)805 ) |
518 | 521 | NewVal = LVMap[LatchInst]; |
519 | 894 | |
520 | 894 | PHI->setIncomingValue(PHI->getBasicBlockIndex(NewPreHeader), NewVal); |
521 | 894 | } |
522 | 334 | |
523 | 334 | // Adjust the branch weights on the loop exit. |
524 | 334 | if (ExitWeight334 ) { |
525 | 0 | // The backedge count is the difference of current header weight and |
526 | 0 | // current loop exit weight. If the current header weight is smaller than |
527 | 0 | // the current loop exit weight, we mark the loop backedge weight as 1. |
528 | 0 | uint64_t BackEdgeWeight = 0; |
529 | 0 | if (ExitWeight < CurHeaderWeight) |
530 | 0 | BackEdgeWeight = CurHeaderWeight - ExitWeight; |
531 | 0 | else |
532 | 0 | BackEdgeWeight = 1; |
533 | 0 | MDBuilder MDB(LatchBR->getContext()); |
534 | 0 | MDNode *WeightNode = |
535 | 0 | HeaderIdx ? MDB.createBranchWeights(ExitWeight, BackEdgeWeight) |
536 | 0 | : MDB.createBranchWeights(BackEdgeWeight, ExitWeight); |
537 | 0 | LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode); |
538 | 0 | } |
539 | 334 | |
540 | 334 | // If the loop is nested, we changed the parent loop, update SE. |
541 | 334 | if (Loop *ParentLoop334 = L->getParentLoop()) { |
542 | 70 | SE->forgetLoop(ParentLoop); |
543 | 70 | |
544 | 70 | // FIXME: Incrementally update loop-simplify |
545 | 70 | simplifyLoop(ParentLoop, DT, LI, SE, AC, PreserveLCSSA); |
546 | 334 | } else { |
547 | 264 | // FIXME: Incrementally update loop-simplify |
548 | 264 | simplifyLoop(L, DT, LI, SE, AC, PreserveLCSSA); |
549 | 264 | } |
550 | 334 | |
551 | 334 | NumPeeled++; |
552 | 334 | |
553 | 334 | return true; |
554 | 334 | } |