/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/polly/lib/CodeGen/BlockGenerators.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- BlockGenerators.cpp - Generate code for statements -----*- C++ -*-===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file implements the BlockGenerator and VectorBlockGenerator classes, |
10 | | // which generate sequential code and vectorized code for a polyhedral |
11 | | // statement, respectively. |
12 | | // |
13 | | //===----------------------------------------------------------------------===// |
14 | | |
15 | | #include "polly/CodeGen/BlockGenerators.h" |
16 | | #include "polly/CodeGen/IslExprBuilder.h" |
17 | | #include "polly/CodeGen/RuntimeDebugBuilder.h" |
18 | | #include "polly/Options.h" |
19 | | #include "polly/ScopInfo.h" |
20 | | #include "polly/Support/ScopHelper.h" |
21 | | #include "polly/Support/VirtualInstruction.h" |
22 | | #include "llvm/Analysis/LoopInfo.h" |
23 | | #include "llvm/Analysis/RegionInfo.h" |
24 | | #include "llvm/Analysis/ScalarEvolution.h" |
25 | | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
26 | | #include "llvm/Transforms/Utils/Local.h" |
27 | | #include "isl/ast.h" |
28 | | #include <deque> |
29 | | |
30 | | using namespace llvm; |
31 | | using namespace polly; |
32 | | |
33 | | static cl::opt<bool> Aligned("enable-polly-aligned", |
34 | | cl::desc("Assumed aligned memory accesses."), |
35 | | cl::Hidden, cl::init(false), cl::ZeroOrMore, |
36 | | cl::cat(PollyCategory)); |
37 | | |
38 | | bool PollyDebugPrinting; |
39 | | static cl::opt<bool, true> DebugPrintingX( |
40 | | "polly-codegen-add-debug-printing", |
41 | | cl::desc("Add printf calls that show the values loaded/stored."), |
42 | | cl::location(PollyDebugPrinting), cl::Hidden, cl::init(false), |
43 | | cl::ZeroOrMore, cl::cat(PollyCategory)); |
44 | | |
45 | | static cl::opt<bool> TraceStmts( |
46 | | "polly-codegen-trace-stmts", |
47 | | cl::desc("Add printf calls that print the statement being executed"), |
48 | | cl::Hidden, cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); |
49 | | |
50 | | static cl::opt<bool> TraceScalars( |
51 | | "polly-codegen-trace-scalars", |
52 | | cl::desc("Add printf calls that print the values of all scalar values " |
53 | | "used in a statement. Requires -polly-codegen-trace-stmts."), |
54 | | cl::Hidden, cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); |
55 | | |
56 | | BlockGenerator::BlockGenerator( |
57 | | PollyIRBuilder &B, LoopInfo &LI, ScalarEvolution &SE, DominatorTree &DT, |
58 | | AllocaMapTy &ScalarMap, EscapeUsersAllocaMapTy &EscapeMap, |
59 | | ValueMapT &GlobalMap, IslExprBuilder *ExprBuilder, BasicBlock *StartBlock) |
60 | | : Builder(B), LI(LI), SE(SE), ExprBuilder(ExprBuilder), DT(DT), |
61 | | EntryBB(nullptr), ScalarMap(ScalarMap), EscapeMap(EscapeMap), |
62 | 305 | GlobalMap(GlobalMap), StartBlock(StartBlock) {} |
63 | | |
64 | | Value *BlockGenerator::trySynthesizeNewValue(ScopStmt &Stmt, Value *Old, |
65 | | ValueMapT &BBMap, |
66 | | LoopToScevMapT <S, |
67 | 700 | Loop *L) const { |
68 | 700 | if (!SE.isSCEVable(Old->getType())) |
69 | 0 | return nullptr; |
70 | 700 | |
71 | 700 | const SCEV *Scev = SE.getSCEVAtScope(Old, L); |
72 | 700 | if (!Scev) |
73 | 0 | return nullptr; |
74 | 700 | |
75 | 700 | if (isa<SCEVCouldNotCompute>(Scev)) |
76 | 0 | return nullptr; |
77 | 700 | |
78 | 700 | const SCEV *NewScev = SCEVLoopAddRecRewriter::rewrite(Scev, LTS, SE); |
79 | 700 | ValueMapT VTV; |
80 | 700 | VTV.insert(BBMap.begin(), BBMap.end()); |
81 | 700 | VTV.insert(GlobalMap.begin(), GlobalMap.end()); |
82 | 700 | |
83 | 700 | Scop &S = *Stmt.getParent(); |
84 | 700 | const DataLayout &DL = S.getFunction().getParent()->getDataLayout(); |
85 | 700 | auto IP = Builder.GetInsertPoint(); |
86 | 700 | |
87 | 700 | assert(IP != Builder.GetInsertBlock()->end() && |
88 | 700 | "Only instructions can be insert points for SCEVExpander"); |
89 | 700 | Value *Expanded = |
90 | 700 | expandCodeFor(S, SE, DL, "polly", NewScev, Old->getType(), &*IP, &VTV, |
91 | 700 | StartBlock->getSinglePredecessor()); |
92 | 700 | |
93 | 700 | BBMap[Old] = Expanded; |
94 | 700 | return Expanded; |
95 | 700 | } |
96 | | |
97 | | Value *BlockGenerator::getNewValue(ScopStmt &Stmt, Value *Old, ValueMapT &BBMap, |
98 | 2.27k | LoopToScevMapT <S, Loop *L) const { |
99 | 2.27k | |
100 | 2.27k | auto lookupGlobally = [this](Value *Old) -> Value * { |
101 | 1.26k | Value *New = GlobalMap.lookup(Old); |
102 | 1.26k | if (!New) |
103 | 1.20k | return nullptr; |
104 | 55 | |
105 | 55 | // Required by: |
106 | 55 | // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded.ll |
107 | 55 | // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded_different_bb.ll |
108 | 55 | // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded_pass_only_needed.ll |
109 | 55 | // * Isl/CodeGen/OpenMP/invariant_base_pointers_preloaded.ll |
110 | 55 | // * Isl/CodeGen/OpenMP/loop-body-references-outer-values-3.ll |
111 | 55 | // * Isl/CodeGen/OpenMP/single_loop_with_loop_invariant_baseptr.ll |
112 | 55 | // GlobalMap should be a mapping from (value in original SCoP) to (copied |
113 | 55 | // value in generated SCoP), without intermediate mappings, which might |
114 | 55 | // easily require transitiveness as well. |
115 | 55 | if (Value *NewRemapped = GlobalMap.lookup(New)) |
116 | 6 | New = NewRemapped; |
117 | 55 | |
118 | 55 | // No test case for this code. |
119 | 55 | if (Old->getType()->getScalarSizeInBits() < |
120 | 55 | New->getType()->getScalarSizeInBits()) |
121 | 0 | New = Builder.CreateTruncOrBitCast(New, Old->getType()); |
122 | 55 | |
123 | 55 | return New; |
124 | 55 | }; |
125 | 2.27k | |
126 | 2.27k | Value *New = nullptr; |
127 | 2.27k | auto VUse = VirtualUse::create(&Stmt, L, Old, true); |
128 | 2.27k | switch (VUse.getKind()) { |
129 | 2.27k | case VirtualUse::Block: |
130 | 147 | // BasicBlock are constants, but the BlockGenerator copies them. |
131 | 147 | New = BBMap.lookup(Old); |
132 | 147 | break; |
133 | 2.27k | |
134 | 2.27k | case VirtualUse::Constant: |
135 | 415 | // Used by: |
136 | 415 | // * Isl/CodeGen/OpenMP/reference-argument-from-non-affine-region.ll |
137 | 415 | // Constants should not be redefined. In this case, the GlobalMap just |
138 | 415 | // contains a mapping to the same constant, which is unnecessary, but |
139 | 415 | // harmless. |
140 | 415 | if ((New = lookupGlobally(Old))) |
141 | 3 | break; |
142 | 412 | |
143 | 412 | assert(!BBMap.count(Old)); |
144 | 412 | New = Old; |
145 | 412 | break; |
146 | 412 | |
147 | 412 | case VirtualUse::ReadOnly: |
148 | 25 | assert(!GlobalMap.count(Old)); |
149 | 25 | |
150 | 25 | // Required for: |
151 | 25 | // * Isl/CodeGen/MemAccess/create_arrays.ll |
152 | 25 | // * Isl/CodeGen/read-only-scalars.ll |
153 | 25 | // * ScheduleOptimizer/pattern-matching-based-opts_10.ll |
154 | 25 | // For some reason these reload a read-only value. The reloaded value ends |
155 | 25 | // up in BBMap, buts its value should be identical. |
156 | 25 | // |
157 | 25 | // Required for: |
158 | 25 | // * Isl/CodeGen/OpenMP/single_loop_with_param.ll |
159 | 25 | // The parallel subfunctions need to reference the read-only value from the |
160 | 25 | // parent function, this is done by reloading them locally. |
161 | 25 | if ((New = BBMap.lookup(Old))) |
162 | 24 | break; |
163 | 1 | |
164 | 1 | New = Old; |
165 | 1 | break; |
166 | 1 | |
167 | 800 | case VirtualUse::Synthesizable: |
168 | 800 | // Used by: |
169 | 800 | // * Isl/CodeGen/OpenMP/loop-body-references-outer-values-3.ll |
170 | 800 | // * Isl/CodeGen/OpenMP/recomputed-srem.ll |
171 | 800 | // * Isl/CodeGen/OpenMP/reference-other-bb.ll |
172 | 800 | // * Isl/CodeGen/OpenMP/two-parallel-loops-reference-outer-indvar.ll |
173 | 800 | // For some reason synthesizable values end up in GlobalMap. Their values |
174 | 800 | // are the same as trySynthesizeNewValue would return. The legacy |
175 | 800 | // implementation prioritized GlobalMap, so this is what we do here as well. |
176 | 800 | // Ideally, synthesizable values should not end up in GlobalMap. |
177 | 800 | if ((New = lookupGlobally(Old))) |
178 | 7 | break; |
179 | 793 | |
180 | 793 | // Required for: |
181 | 793 | // * Isl/CodeGen/RuntimeDebugBuilder/combine_different_values.ll |
182 | 793 | // * Isl/CodeGen/getNumberOfIterations.ll |
183 | 793 | // * Isl/CodeGen/non_affine_float_compare.ll |
184 | 793 | // * ScheduleOptimizer/pattern-matching-based-opts_10.ll |
185 | 793 | // Ideally, synthesizable values are synthesized by trySynthesizeNewValue, |
186 | 793 | // not precomputed (SCEVExpander has its own caching mechanism). |
187 | 793 | // These tests fail without this, but I think trySynthesizeNewValue would |
188 | 793 | // just re-synthesize the same instructions. |
189 | 793 | if ((New = BBMap.lookup(Old))) |
190 | 93 | break; |
191 | 700 | |
192 | 700 | New = trySynthesizeNewValue(Stmt, Old, BBMap, LTS, L); |
193 | 700 | break; |
194 | 700 | |
195 | 700 | case VirtualUse::Hoisted: |
196 | 45 | // TODO: Hoisted invariant loads should be found in GlobalMap only, but not |
197 | 45 | // redefined locally (which will be ignored anyway). That is, the following |
198 | 45 | // assertion should apply: assert(!BBMap.count(Old)) |
199 | 45 | |
200 | 45 | New = lookupGlobally(Old); |
201 | 45 | break; |
202 | 700 | |
203 | 841 | case VirtualUse::Intra: |
204 | 841 | case VirtualUse::Inter: |
205 | 841 | assert(!GlobalMap.count(Old) && |
206 | 841 | "Intra and inter-stmt values are never global"); |
207 | 841 | New = BBMap.lookup(Old); |
208 | 841 | break; |
209 | 2.27k | } |
210 | 2.27k | assert(New && "Unexpected scalar dependence in region!"); |
211 | 2.27k | return New; |
212 | 2.27k | } |
213 | | |
214 | | void BlockGenerator::copyInstScalar(ScopStmt &Stmt, Instruction *Inst, |
215 | 567 | ValueMapT &BBMap, LoopToScevMapT <S) { |
216 | 567 | // We do not generate debug intrinsics as we did not investigate how to |
217 | 567 | // copy them correctly. At the current state, they just crash the code |
218 | 567 | // generation as the meta-data operands are not correctly copied. |
219 | 567 | if (isa<DbgInfoIntrinsic>(Inst)) |
220 | 0 | return; |
221 | 567 | |
222 | 567 | Instruction *NewInst = Inst->clone(); |
223 | 567 | |
224 | 567 | // Replace old operands with the new ones. |
225 | 1.06k | for (Value *OldOperand : Inst->operands()) { |
226 | 1.06k | Value *NewOperand = |
227 | 1.06k | getNewValue(Stmt, OldOperand, BBMap, LTS, getLoopForStmt(Stmt)); |
228 | 1.06k | |
229 | 1.06k | if (!NewOperand) { |
230 | 0 | assert(!isa<StoreInst>(NewInst) && |
231 | 0 | "Store instructions are always needed!"); |
232 | 0 | NewInst->deleteValue(); |
233 | 0 | return; |
234 | 0 | } |
235 | 1.06k | |
236 | 1.06k | NewInst->replaceUsesOfWith(OldOperand, NewOperand); |
237 | 1.06k | } |
238 | 567 | |
239 | 567 | Builder.Insert(NewInst); |
240 | 567 | BBMap[Inst] = NewInst; |
241 | 567 | |
242 | 567 | // When copying the instruction onto the Module meant for the GPU, |
243 | 567 | // debug metadata attached to an instruction causes all related |
244 | 567 | // metadata to be pulled into the Module. This includes the DICompileUnit, |
245 | 567 | // which will not be listed in llvm.dbg.cu of the Module since the Module |
246 | 567 | // doesn't contain one. This fails the verification of the Module and the |
247 | 567 | // subsequent generation of the ASM string. |
248 | 567 | if (NewInst->getModule() != Inst->getModule()) |
249 | 0 | NewInst->setDebugLoc(llvm::DebugLoc()); |
250 | 567 | |
251 | 567 | if (!NewInst->getType()->isVoidTy()) |
252 | 458 | NewInst->setName("p_" + Inst->getName()); |
253 | 567 | } |
254 | | |
255 | | Value * |
256 | | BlockGenerator::generateLocationAccessed(ScopStmt &Stmt, MemAccInst Inst, |
257 | | ValueMapT &BBMap, LoopToScevMapT <S, |
258 | 818 | isl_id_to_ast_expr *NewAccesses) { |
259 | 818 | const MemoryAccess &MA = Stmt.getArrayAccessFor(Inst); |
260 | 818 | return generateLocationAccessed( |
261 | 818 | Stmt, getLoopForStmt(Stmt), |
262 | 818 | Inst.isNull() ? nullptr0 : Inst.getPointerOperand(), BBMap, LTS, |
263 | 818 | NewAccesses, MA.getId().release(), MA.getAccessValue()->getType()); |
264 | 818 | } |
265 | | |
266 | | Value *BlockGenerator::generateLocationAccessed( |
267 | | ScopStmt &Stmt, Loop *L, Value *Pointer, ValueMapT &BBMap, |
268 | | LoopToScevMapT <S, isl_id_to_ast_expr *NewAccesses, __isl_take isl_id *Id, |
269 | 848 | Type *ExpectedType) { |
270 | 848 | isl_ast_expr *AccessExpr = isl_id_to_ast_expr_get(NewAccesses, Id); |
271 | 848 | |
272 | 848 | if (AccessExpr) { |
273 | 243 | AccessExpr = isl_ast_expr_address_of(AccessExpr); |
274 | 243 | auto Address = ExprBuilder->create(AccessExpr); |
275 | 243 | |
276 | 243 | // Cast the address of this memory access to a pointer type that has the |
277 | 243 | // same element type as the original access, but uses the address space of |
278 | 243 | // the newly generated pointer. |
279 | 243 | auto OldPtrTy = ExpectedType->getPointerTo(); |
280 | 243 | auto NewPtrTy = Address->getType(); |
281 | 243 | OldPtrTy = PointerType::get(OldPtrTy->getElementType(), |
282 | 243 | NewPtrTy->getPointerAddressSpace()); |
283 | 243 | |
284 | 243 | if (OldPtrTy != NewPtrTy) |
285 | 4 | Address = Builder.CreateBitOrPointerCast(Address, OldPtrTy); |
286 | 243 | return Address; |
287 | 243 | } |
288 | 605 | assert( |
289 | 605 | Pointer && |
290 | 605 | "If expression was not generated, must use the original pointer value"); |
291 | 605 | return getNewValue(Stmt, Pointer, BBMap, LTS, L); |
292 | 605 | } |
293 | | |
294 | | Value * |
295 | | BlockGenerator::getImplicitAddress(MemoryAccess &Access, Loop *L, |
296 | | LoopToScevMapT <S, ValueMapT &BBMap, |
297 | 246 | __isl_keep isl_id_to_ast_expr *NewAccesses) { |
298 | 246 | if (Access.isLatestArrayKind()) |
299 | 30 | return generateLocationAccessed(*Access.getStatement(), L, nullptr, BBMap, |
300 | 30 | LTS, NewAccesses, Access.getId().release(), |
301 | 30 | Access.getAccessValue()->getType()); |
302 | 216 | |
303 | 216 | return getOrCreateAlloca(Access); |
304 | 216 | } |
305 | | |
306 | 4.01k | Loop *BlockGenerator::getLoopForStmt(const ScopStmt &Stmt) const { |
307 | 4.01k | auto *StmtBB = Stmt.getEntryBlock(); |
308 | 4.01k | return LI.getLoopFor(StmtBB); |
309 | 4.01k | } |
310 | | |
311 | | Value *BlockGenerator::generateArrayLoad(ScopStmt &Stmt, LoadInst *Load, |
312 | | ValueMapT &BBMap, LoopToScevMapT <S, |
313 | 384 | isl_id_to_ast_expr *NewAccesses) { |
314 | 384 | if (Value *PreloadLoad = GlobalMap.lookup(Load)) |
315 | 30 | return PreloadLoad; |
316 | 354 | |
317 | 354 | Value *NewPointer = |
318 | 354 | generateLocationAccessed(Stmt, Load, BBMap, LTS, NewAccesses); |
319 | 354 | Value *ScalarLoad = Builder.CreateAlignedLoad( |
320 | 354 | NewPointer, Load->getAlignment(), Load->getName() + "_p_scalar_"); |
321 | 354 | |
322 | 354 | if (PollyDebugPrinting) |
323 | 5 | RuntimeDebugBuilder::createCPUPrinter(Builder, "Load from ", NewPointer, |
324 | 5 | ": ", ScalarLoad, "\n"); |
325 | 354 | |
326 | 354 | return ScalarLoad; |
327 | 354 | } |
328 | | |
329 | | void BlockGenerator::generateArrayStore(ScopStmt &Stmt, StoreInst *Store, |
330 | | ValueMapT &BBMap, LoopToScevMapT <S, |
331 | 411 | isl_id_to_ast_expr *NewAccesses) { |
332 | 411 | MemoryAccess &MA = Stmt.getArrayAccessFor(Store); |
333 | 411 | isl::set AccDom = MA.getAccessRelation().domain(); |
334 | 411 | std::string Subject = MA.getId().get_name(); |
335 | 411 | |
336 | 411 | generateConditionalExecution(Stmt, AccDom, Subject.c_str(), [&, this]() { |
337 | 410 | Value *NewPointer = |
338 | 410 | generateLocationAccessed(Stmt, Store, BBMap, LTS, NewAccesses); |
339 | 410 | Value *ValueOperand = getNewValue(Stmt, Store->getValueOperand(), BBMap, |
340 | 410 | LTS, getLoopForStmt(Stmt)); |
341 | 410 | |
342 | 410 | if (PollyDebugPrinting) |
343 | 1 | RuntimeDebugBuilder::createCPUPrinter(Builder, "Store to ", NewPointer, |
344 | 1 | ": ", ValueOperand, "\n"); |
345 | 410 | |
346 | 410 | Builder.CreateAlignedStore(ValueOperand, NewPointer, Store->getAlignment()); |
347 | 410 | }); |
348 | 411 | } |
349 | | |
350 | 1.43k | bool BlockGenerator::canSyntheziseInStmt(ScopStmt &Stmt, Instruction *Inst) { |
351 | 1.43k | Loop *L = getLoopForStmt(Stmt); |
352 | 1.43k | return (Stmt.isBlockStmt() || !Stmt.getRegion()->contains(L)172 ) && |
353 | 1.43k | canSynthesize(Inst, *Stmt.getParent(), &SE, L)1.42k ; |
354 | 1.43k | } |
355 | | |
356 | | void BlockGenerator::copyInstruction(ScopStmt &Stmt, Instruction *Inst, |
357 | | ValueMapT &BBMap, LoopToScevMapT <S, |
358 | 1.40k | isl_id_to_ast_expr *NewAccesses) { |
359 | 1.40k | // Terminator instructions control the control flow. They are explicitly |
360 | 1.40k | // expressed in the clast and do not need to be copied. |
361 | 1.40k | if (Inst->isTerminator()) |
362 | 68 | return; |
363 | 1.33k | |
364 | 1.33k | // Synthesizable statements will be generated on-demand. |
365 | 1.33k | if (canSyntheziseInStmt(Stmt, Inst)) |
366 | 32 | return; |
367 | 1.30k | |
368 | 1.30k | if (auto *Load = dyn_cast<LoadInst>(Inst)) { |
369 | 384 | Value *NewLoad = generateArrayLoad(Stmt, Load, BBMap, LTS, NewAccesses); |
370 | 384 | // Compute NewLoad before its insertion in BBMap to make the insertion |
371 | 384 | // deterministic. |
372 | 384 | BBMap[Load] = NewLoad; |
373 | 384 | return; |
374 | 384 | } |
375 | 919 | |
376 | 919 | if (auto *Store = dyn_cast<StoreInst>(Inst)) { |
377 | 411 | // Identified as redundant by -polly-simplify. |
378 | 411 | if (!Stmt.getArrayAccessOrNULLFor(Store)) |
379 | 0 | return; |
380 | 411 | |
381 | 411 | generateArrayStore(Stmt, Store, BBMap, LTS, NewAccesses); |
382 | 411 | return; |
383 | 411 | } |
384 | 508 | |
385 | 508 | if (auto *PHI = dyn_cast<PHINode>(Inst)) { |
386 | 45 | copyPHIInstruction(Stmt, PHI, BBMap, LTS); |
387 | 45 | return; |
388 | 45 | } |
389 | 463 | |
390 | 463 | // Skip some special intrinsics for which we do not adjust the semantics to |
391 | 463 | // the new schedule. All others are handled like every other instruction. |
392 | 463 | if (isIgnoredIntrinsic(Inst)) |
393 | 0 | return; |
394 | 463 | |
395 | 463 | copyInstScalar(Stmt, Inst, BBMap, LTS); |
396 | 463 | } |
397 | | |
398 | 473 | void BlockGenerator::removeDeadInstructions(BasicBlock *BB, ValueMapT &BBMap) { |
399 | 473 | auto NewBB = Builder.GetInsertBlock(); |
400 | 4.29k | for (auto I = NewBB->rbegin(); I != NewBB->rend(); I++3.82k ) { |
401 | 3.82k | Instruction *NewInst = &*I; |
402 | 3.82k | |
403 | 3.82k | if (!isInstructionTriviallyDead(NewInst)) |
404 | 3.78k | continue; |
405 | 40 | |
406 | 40 | for (auto Pair : BBMap) |
407 | 201 | if (Pair.second == NewInst) { |
408 | 29 | BBMap.erase(Pair.first); |
409 | 29 | } |
410 | 40 | |
411 | 40 | NewInst->eraseFromParent(); |
412 | 40 | I = NewBB->rbegin(); |
413 | 40 | } |
414 | 473 | } |
415 | | |
416 | | void BlockGenerator::copyStmt(ScopStmt &Stmt, LoopToScevMapT <S, |
417 | 473 | isl_id_to_ast_expr *NewAccesses) { |
418 | 473 | assert(Stmt.isBlockStmt() && |
419 | 473 | "Only block statements can be copied by the block generator"); |
420 | 473 | |
421 | 473 | ValueMapT BBMap; |
422 | 473 | |
423 | 473 | BasicBlock *BB = Stmt.getBasicBlock(); |
424 | 473 | copyBB(Stmt, BB, BBMap, LTS, NewAccesses); |
425 | 473 | removeDeadInstructions(BB, BBMap); |
426 | 473 | } |
427 | | |
428 | 577 | BasicBlock *BlockGenerator::splitBB(BasicBlock *BB) { |
429 | 577 | BasicBlock *CopyBB = SplitBlock(Builder.GetInsertBlock(), |
430 | 577 | &*Builder.GetInsertPoint(), &DT, &LI); |
431 | 577 | CopyBB->setName("polly.stmt." + BB->getName()); |
432 | 577 | return CopyBB; |
433 | 577 | } |
434 | | |
435 | | BasicBlock *BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, |
436 | | ValueMapT &BBMap, LoopToScevMapT <S, |
437 | 473 | isl_id_to_ast_expr *NewAccesses) { |
438 | 473 | BasicBlock *CopyBB = splitBB(BB); |
439 | 473 | Builder.SetInsertPoint(&CopyBB->front()); |
440 | 473 | generateScalarLoads(Stmt, LTS, BBMap, NewAccesses); |
441 | 473 | generateBeginStmtTrace(Stmt, LTS, BBMap); |
442 | 473 | |
443 | 473 | copyBB(Stmt, BB, CopyBB, BBMap, LTS, NewAccesses); |
444 | 473 | |
445 | 473 | // After a basic block was copied store all scalars that escape this block in |
446 | 473 | // their alloca. |
447 | 473 | generateScalarStores(Stmt, LTS, BBMap, NewAccesses); |
448 | 473 | return CopyBB; |
449 | 473 | } |
450 | | |
451 | | void BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, BasicBlock *CopyBB, |
452 | | ValueMapT &BBMap, LoopToScevMapT <S, |
453 | 577 | isl_id_to_ast_expr *NewAccesses) { |
454 | 577 | EntryBB = &CopyBB->getParent()->getEntryBlock(); |
455 | 577 | |
456 | 577 | // Block statements and the entry blocks of region statement are code |
457 | 577 | // generated from instruction lists. This allow us to optimize the |
458 | 577 | // instructions that belong to a certain scop statement. As the code |
459 | 577 | // structure of region statements might be arbitrary complex, optimizing the |
460 | 577 | // instruction list is not yet supported. |
461 | 577 | if (Stmt.isBlockStmt() || (104 Stmt.isRegionStmt()104 && Stmt.getEntryBlock() == BB104 )) |
462 | 509 | for (Instruction *Inst : Stmt.getInstructions()) |
463 | 1.15k | copyInstruction(Stmt, Inst, BBMap, LTS, NewAccesses); |
464 | 68 | else |
465 | 68 | for (Instruction &Inst : *BB) |
466 | 172 | copyInstruction(Stmt, &Inst, BBMap, LTS, NewAccesses); |
467 | 577 | } |
468 | | |
469 | 221 | Value *BlockGenerator::getOrCreateAlloca(const MemoryAccess &Access) { |
470 | 221 | assert(!Access.isLatestArrayKind() && "Trying to get alloca for array kind"); |
471 | 221 | |
472 | 221 | return getOrCreateAlloca(Access.getLatestScopArrayInfo()); |
473 | 221 | } |
474 | | |
475 | 303 | Value *BlockGenerator::getOrCreateAlloca(const ScopArrayInfo *Array) { |
476 | 303 | assert(!Array->isArrayKind() && "Trying to get alloca for array kind"); |
477 | 303 | |
478 | 303 | auto &Addr = ScalarMap[Array]; |
479 | 303 | |
480 | 303 | if (Addr) { |
481 | 172 | // Allow allocas to be (temporarily) redirected once by adding a new |
482 | 172 | // old-alloca-addr to new-addr mapping to GlobalMap. This functionality |
483 | 172 | // is used for example by the OpenMP code generation where a first use |
484 | 172 | // of a scalar while still in the host code allocates a normal alloca with |
485 | 172 | // getOrCreateAlloca. When the values of this scalar are accessed during |
486 | 172 | // the generation of the parallel subfunction, these values are copied over |
487 | 172 | // to the parallel subfunction and each request for a scalar alloca slot |
488 | 172 | // must be forwarded to the temporary in-subfunction slot. This mapping is |
489 | 172 | // removed when the subfunction has been generated and again normal host |
490 | 172 | // code is generated. Due to the following reasons it is not possible to |
491 | 172 | // perform the GlobalMap lookup right after creating the alloca below, but |
492 | 172 | // instead we need to check GlobalMap at each call to getOrCreateAlloca: |
493 | 172 | // |
494 | 172 | // 1) GlobalMap may be changed multiple times (for each parallel loop), |
495 | 172 | // 2) The temporary mapping is commonly only known after the initial |
496 | 172 | // alloca has already been generated, and |
497 | 172 | // 3) The original alloca value must be restored after leaving the |
498 | 172 | // sub-function. |
499 | 172 | if (Value *NewAddr = GlobalMap.lookup(&*Addr)) |
500 | 4 | return NewAddr; |
501 | 168 | return Addr; |
502 | 168 | } |
503 | 131 | |
504 | 131 | Type *Ty = Array->getElementType(); |
505 | 131 | Value *ScalarBase = Array->getBasePtr(); |
506 | 131 | std::string NameExt; |
507 | 131 | if (Array->isPHIKind()) |
508 | 34 | NameExt = ".phiops"; |
509 | 97 | else |
510 | 97 | NameExt = ".s2a"; |
511 | 131 | |
512 | 131 | const DataLayout &DL = Builder.GetInsertBlock()->getModule()->getDataLayout(); |
513 | 131 | |
514 | 131 | Addr = new AllocaInst(Ty, DL.getAllocaAddrSpace(), |
515 | 131 | ScalarBase->getName() + NameExt); |
516 | 131 | EntryBB = &Builder.GetInsertBlock()->getParent()->getEntryBlock(); |
517 | 131 | Addr->insertBefore(&*EntryBB->getFirstInsertionPt()); |
518 | 131 | |
519 | 131 | return Addr; |
520 | 131 | } |
521 | | |
522 | 74 | void BlockGenerator::handleOutsideUsers(const Scop &S, ScopArrayInfo *Array) { |
523 | 74 | Instruction *Inst = cast<Instruction>(Array->getBasePtr()); |
524 | 74 | |
525 | 74 | // If there are escape users we get the alloca for this instruction and put it |
526 | 74 | // in the EscapeMap for later finalization. Lastly, if the instruction was |
527 | 74 | // copied multiple times we already did this and can exit. |
528 | 74 | if (EscapeMap.count(Inst)) |
529 | 6 | return; |
530 | 68 | |
531 | 68 | EscapeUserVectorTy EscapeUsers; |
532 | 96 | for (User *U : Inst->users()) { |
533 | 96 | |
534 | 96 | // Non-instruction user will never escape. |
535 | 96 | Instruction *UI = dyn_cast<Instruction>(U); |
536 | 96 | if (!UI) |
537 | 0 | continue; |
538 | 96 | |
539 | 96 | if (S.contains(UI)) |
540 | 59 | continue; |
541 | 37 | |
542 | 37 | EscapeUsers.push_back(UI); |
543 | 37 | } |
544 | 68 | |
545 | 68 | // Exit if no escape uses were found. |
546 | 68 | if (EscapeUsers.empty()) |
547 | 35 | return; |
548 | 33 | |
549 | 33 | // Get or create an escape alloca for this instruction. |
550 | 33 | auto *ScalarAddr = getOrCreateAlloca(Array); |
551 | 33 | |
552 | 33 | // Remember that this instruction has escape uses and the escape alloca. |
553 | 33 | EscapeMap[Inst] = std::make_pair(ScalarAddr, std::move(EscapeUsers)); |
554 | 33 | } |
555 | | |
556 | | void BlockGenerator::generateScalarLoads( |
557 | | ScopStmt &Stmt, LoopToScevMapT <S, ValueMapT &BBMap, |
558 | 509 | __isl_keep isl_id_to_ast_expr *NewAccesses) { |
559 | 992 | for (MemoryAccess *MA : Stmt) { |
560 | 992 | if (MA->isOriginalArrayKind() || MA->isWrite()247 ) |
561 | 898 | continue; |
562 | 94 | |
563 | | #ifndef NDEBUG |
564 | | auto StmtDom = |
565 | | Stmt.getDomain().intersect_params(Stmt.getParent()->getContext()); |
566 | | auto AccDom = MA->getAccessRelation().domain(); |
567 | | assert(!StmtDom.is_subset(AccDom).is_false() && |
568 | | "Scalar must be loaded in all statement instances"); |
569 | | #endif |
570 | | |
571 | 94 | auto *Address = |
572 | 94 | getImplicitAddress(*MA, getLoopForStmt(Stmt), LTS, BBMap, NewAccesses); |
573 | 94 | assert((!isa<Instruction>(Address) || |
574 | 94 | DT.dominates(cast<Instruction>(Address)->getParent(), |
575 | 94 | Builder.GetInsertBlock())) && |
576 | 94 | "Domination violation"); |
577 | 94 | BBMap[MA->getAccessValue()] = |
578 | 94 | Builder.CreateLoad(Address, Address->getName() + ".reload"); |
579 | 94 | } |
580 | 509 | } |
581 | | |
582 | | Value *BlockGenerator::buildContainsCondition(ScopStmt &Stmt, |
583 | 11 | const isl::set &Subdomain) { |
584 | 11 | isl::ast_build AstBuild = Stmt.getAstBuild(); |
585 | 11 | isl::set Domain = Stmt.getDomain(); |
586 | 11 | |
587 | 11 | isl::union_map USchedule = AstBuild.get_schedule(); |
588 | 11 | USchedule = USchedule.intersect_domain(Domain); |
589 | 11 | |
590 | 11 | assert(!USchedule.is_empty()); |
591 | 11 | isl::map Schedule = isl::map::from_union_map(USchedule); |
592 | 11 | |
593 | 11 | isl::set ScheduledDomain = Schedule.range(); |
594 | 11 | isl::set ScheduledSet = Subdomain.apply(Schedule); |
595 | 11 | |
596 | 11 | isl::ast_build RestrictedBuild = AstBuild.restrict(ScheduledDomain); |
597 | 11 | |
598 | 11 | isl::ast_expr IsInSet = RestrictedBuild.expr_from(ScheduledSet); |
599 | 11 | Value *IsInSetExpr = ExprBuilder->create(IsInSet.copy()); |
600 | 11 | IsInSetExpr = Builder.CreateICmpNE( |
601 | 11 | IsInSetExpr, ConstantInt::get(IsInSetExpr->getType(), 0)); |
602 | 11 | |
603 | 11 | return IsInSetExpr; |
604 | 11 | } |
605 | | |
606 | | void BlockGenerator::generateConditionalExecution( |
607 | | ScopStmt &Stmt, const isl::set &Subdomain, StringRef Subject, |
608 | 564 | const std::function<void()> &GenThenFunc) { |
609 | 564 | isl::set StmtDom = Stmt.getDomain(); |
610 | 564 | |
611 | 564 | // If the condition is a tautology, don't generate a condition around the |
612 | 564 | // code. |
613 | 564 | bool IsPartialWrite = |
614 | 564 | !StmtDom.intersect_params(Stmt.getParent()->getContext()) |
615 | 564 | .is_subset(Subdomain); |
616 | 564 | if (!IsPartialWrite) { |
617 | 553 | GenThenFunc(); |
618 | 553 | return; |
619 | 553 | } |
620 | 11 | |
621 | 11 | // Generate the condition. |
622 | 11 | Value *Cond = buildContainsCondition(Stmt, Subdomain); |
623 | 11 | |
624 | 11 | // Don't call GenThenFunc if it is never executed. An ast index expression |
625 | 11 | // might not be defined in this case. |
626 | 11 | if (auto *Const = dyn_cast<ConstantInt>(Cond)) |
627 | 2 | if (Const->isZero()) |
628 | 2 | return; |
629 | 9 | |
630 | 9 | BasicBlock *HeadBlock = Builder.GetInsertBlock(); |
631 | 9 | StringRef BlockName = HeadBlock->getName(); |
632 | 9 | |
633 | 9 | // Generate the conditional block. |
634 | 9 | SplitBlockAndInsertIfThen(Cond, &*Builder.GetInsertPoint(), false, nullptr, |
635 | 9 | &DT, &LI); |
636 | 9 | BranchInst *Branch = cast<BranchInst>(HeadBlock->getTerminator()); |
637 | 9 | BasicBlock *ThenBlock = Branch->getSuccessor(0); |
638 | 9 | BasicBlock *TailBlock = Branch->getSuccessor(1); |
639 | 9 | |
640 | 9 | // Assign descriptive names. |
641 | 9 | if (auto *CondInst = dyn_cast<Instruction>(Cond)) |
642 | 9 | CondInst->setName("polly." + Subject + ".cond"); |
643 | 9 | ThenBlock->setName(BlockName + "." + Subject + ".partial"); |
644 | 9 | TailBlock->setName(BlockName + ".cont"); |
645 | 9 | |
646 | 9 | // Put the client code into the conditional block and continue in the merge |
647 | 9 | // block afterwards. |
648 | 9 | Builder.SetInsertPoint(ThenBlock, ThenBlock->getFirstInsertionPt()); |
649 | 9 | GenThenFunc(); |
650 | 9 | Builder.SetInsertPoint(TailBlock, TailBlock->getFirstInsertionPt()); |
651 | 9 | } |
652 | | |
653 | 0 | static std::string getInstName(Value *Val) { |
654 | 0 | std::string Result; |
655 | 0 | raw_string_ostream OS(Result); |
656 | 0 | Val->printAsOperand(OS, false); |
657 | 0 | return OS.str(); |
658 | 0 | } |
659 | | |
660 | | void BlockGenerator::generateBeginStmtTrace(ScopStmt &Stmt, LoopToScevMapT <S, |
661 | 509 | ValueMapT &BBMap) { |
662 | 509 | if (!TraceStmts) |
663 | 508 | return; |
664 | 1 | |
665 | 1 | Scop *S = Stmt.getParent(); |
666 | 1 | const char *BaseName = Stmt.getBaseName(); |
667 | 1 | |
668 | 1 | isl::ast_build AstBuild = Stmt.getAstBuild(); |
669 | 1 | isl::set Domain = Stmt.getDomain(); |
670 | 1 | |
671 | 1 | isl::union_map USchedule = AstBuild.get_schedule().intersect_domain(Domain); |
672 | 1 | isl::map Schedule = isl::map::from_union_map(USchedule); |
673 | 1 | assert(Schedule.is_empty().is_false() && |
674 | 1 | "The stmt must have a valid instance"); |
675 | 1 | |
676 | 1 | isl::multi_pw_aff ScheduleMultiPwAff = |
677 | 1 | isl::pw_multi_aff::from_map(Schedule.reverse()); |
678 | 1 | isl::ast_build RestrictedBuild = AstBuild.restrict(Schedule.range()); |
679 | 1 | |
680 | 1 | // Sequence of strings to print. |
681 | 1 | SmallVector<llvm::Value *, 8> Values; |
682 | 1 | |
683 | 1 | // Print the name of the statement. |
684 | 1 | // TODO: Indent by the depth of the statement instance in the schedule tree. |
685 | 1 | Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, BaseName)); |
686 | 1 | Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, "(")); |
687 | 1 | |
688 | 1 | // Add the coordinate of the statement instance. |
689 | 1 | int DomDims = ScheduleMultiPwAff.dim(isl::dim::out); |
690 | 2 | for (int i = 0; i < DomDims; i += 11 ) { |
691 | 1 | if (i > 0) |
692 | 0 | Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, ",")); |
693 | 1 | |
694 | 1 | isl::ast_expr IsInSet = |
695 | 1 | RestrictedBuild.expr_from(ScheduleMultiPwAff.get_pw_aff(i)); |
696 | 1 | Values.push_back(ExprBuilder->create(IsInSet.copy())); |
697 | 1 | } |
698 | 1 | |
699 | 1 | if (TraceScalars) { |
700 | 1 | Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, ")")); |
701 | 1 | DenseSet<Instruction *> Encountered; |
702 | 1 | |
703 | 1 | // Add the value of each scalar (and the result of PHIs) used in the |
704 | 1 | // statement. |
705 | 1 | // TODO: Values used in region-statements. |
706 | 1 | for (Instruction *Inst : Stmt.insts()) { |
707 | 1 | if (!RuntimeDebugBuilder::isPrintable(Inst->getType())) |
708 | 1 | continue; |
709 | 0 | |
710 | 0 | if (isa<PHINode>(Inst)) { |
711 | 0 | Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, " ")); |
712 | 0 | Values.push_back(RuntimeDebugBuilder::getPrintableString( |
713 | 0 | Builder, getInstName(Inst))); |
714 | 0 | Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, "=")); |
715 | 0 | Values.push_back(getNewValue(Stmt, Inst, BBMap, LTS, |
716 | 0 | LI.getLoopFor(Inst->getParent()))); |
717 | 0 | } else { |
718 | 0 | for (Value *Op : Inst->operand_values()) { |
719 | 0 | // Do not print values that cannot change during the execution of the |
720 | 0 | // SCoP. |
721 | 0 | auto *OpInst = dyn_cast<Instruction>(Op); |
722 | 0 | if (!OpInst) |
723 | 0 | continue; |
724 | 0 | if (!S->contains(OpInst)) |
725 | 0 | continue; |
726 | 0 | |
727 | 0 | // Print each scalar at most once, and exclude values defined in the |
728 | 0 | // statement itself. |
729 | 0 | if (Encountered.count(OpInst)) |
730 | 0 | continue; |
731 | 0 | |
732 | 0 | Values.push_back( |
733 | 0 | RuntimeDebugBuilder::getPrintableString(Builder, " ")); |
734 | 0 | Values.push_back(RuntimeDebugBuilder::getPrintableString( |
735 | 0 | Builder, getInstName(OpInst))); |
736 | 0 | Values.push_back( |
737 | 0 | RuntimeDebugBuilder::getPrintableString(Builder, "=")); |
738 | 0 | Values.push_back(getNewValue(Stmt, OpInst, BBMap, LTS, |
739 | 0 | LI.getLoopFor(Inst->getParent()))); |
740 | 0 | Encountered.insert(OpInst); |
741 | 0 | } |
742 | 0 | } |
743 | 0 |
|
744 | 0 | Encountered.insert(Inst); |
745 | 0 | } |
746 | 1 | |
747 | 1 | Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, "\n")); |
748 | 1 | } else { |
749 | 0 | Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, ")\n")); |
750 | 0 | } |
751 | 1 | |
752 | 1 | RuntimeDebugBuilder::createCPUPrinter(Builder, ArrayRef<Value *>(Values)); |
753 | 1 | } |
754 | | |
755 | | void BlockGenerator::generateScalarStores( |
756 | | ScopStmt &Stmt, LoopToScevMapT <S, ValueMapT &BBMap, |
757 | 473 | __isl_keep isl_id_to_ast_expr *NewAccesses) { |
758 | 473 | Loop *L = LI.getLoopFor(Stmt.getBasicBlock()); |
759 | 473 | |
760 | 473 | assert(Stmt.isBlockStmt() && |
761 | 473 | "Region statements need to use the generateScalarStores() function in " |
762 | 473 | "the RegionGenerator"); |
763 | 473 | |
764 | 894 | for (MemoryAccess *MA : Stmt) { |
765 | 894 | if (MA->isOriginalArrayKind() || MA->isRead()217 ) |
766 | 761 | continue; |
767 | 133 | |
768 | 133 | isl::set AccDom = MA->getAccessRelation().domain(); |
769 | 133 | std::string Subject = MA->getId().get_name(); |
770 | 133 | |
771 | 133 | generateConditionalExecution( |
772 | 133 | Stmt, AccDom, Subject.c_str(), [&, this, MA]() { |
773 | 132 | Value *Val = MA->getAccessValue(); |
774 | 132 | if (MA->isAnyPHIKind()) { |
775 | 68 | assert(MA->getIncoming().size() >= 1 && |
776 | 68 | "Block statements have exactly one exiting block, or " |
777 | 68 | "multiple but " |
778 | 68 | "with same incoming block and value"); |
779 | 68 | assert(std::all_of(MA->getIncoming().begin(), |
780 | 68 | MA->getIncoming().end(), |
781 | 68 | [&](std::pair<BasicBlock *, Value *> p) -> bool { |
782 | 68 | return p.first == Stmt.getBasicBlock(); |
783 | 68 | }) && |
784 | 68 | "Incoming block must be statement's block"); |
785 | 68 | Val = MA->getIncoming()[0].second; |
786 | 68 | } |
787 | 132 | auto Address = getImplicitAddress(*MA, getLoopForStmt(Stmt), LTS, |
788 | 132 | BBMap, NewAccesses); |
789 | 132 | |
790 | 132 | Val = getNewValue(Stmt, Val, BBMap, LTS, L); |
791 | 132 | assert((!isa<Instruction>(Val) || |
792 | 132 | DT.dominates(cast<Instruction>(Val)->getParent(), |
793 | 132 | Builder.GetInsertBlock())) && |
794 | 132 | "Domination violation"); |
795 | 132 | assert((!isa<Instruction>(Address) || |
796 | 132 | DT.dominates(cast<Instruction>(Address)->getParent(), |
797 | 132 | Builder.GetInsertBlock())) && |
798 | 132 | "Domination violation"); |
799 | 132 | |
800 | 132 | // The new Val might have a different type than the old Val due to |
801 | 132 | // ScalarEvolution looking through bitcasts. |
802 | 132 | if (Val->getType() != Address->getType()->getPointerElementType()) |
803 | 1 | Address = Builder.CreateBitOrPointerCast( |
804 | 1 | Address, Val->getType()->getPointerTo()); |
805 | 132 | |
806 | 132 | Builder.CreateStore(Val, Address); |
807 | 132 | }); |
808 | 133 | } |
809 | 473 | } |
810 | | |
811 | 301 | void BlockGenerator::createScalarInitialization(Scop &S) { |
812 | 301 | BasicBlock *ExitBB = S.getExit(); |
813 | 301 | BasicBlock *PreEntryBB = S.getEnteringBlock(); |
814 | 301 | |
815 | 301 | Builder.SetInsertPoint(&*StartBlock->begin()); |
816 | 301 | |
817 | 640 | for (auto &Array : S.arrays()) { |
818 | 640 | if (Array->getNumberOfDimensions() != 0) |
819 | 490 | continue; |
820 | 150 | if (Array->isPHIKind()) { |
821 | 39 | // For PHI nodes, the only values we need to store are the ones that |
822 | 39 | // reach the PHI node from outside the region. In general there should |
823 | 39 | // only be one such incoming edge and this edge should enter through |
824 | 39 | // 'PreEntryBB'. |
825 | 39 | auto PHI = cast<PHINode>(Array->getBasePtr()); |
826 | 39 | |
827 | 116 | for (auto BI = PHI->block_begin(), BE = PHI->block_end(); BI != BE; BI++77 ) |
828 | 77 | if (!S.contains(*BI) && *BI != PreEntryBB13 ) |
829 | 77 | llvm_unreachable0 ("Incoming edges from outside the scop should always " |
830 | 39 | "come from PreEntryBB"); |
831 | 39 | |
832 | 39 | int Idx = PHI->getBasicBlockIndex(PreEntryBB); |
833 | 39 | if (Idx < 0) |
834 | 26 | continue; |
835 | 13 | |
836 | 13 | Value *ScalarValue = PHI->getIncomingValue(Idx); |
837 | 13 | |
838 | 13 | Builder.CreateStore(ScalarValue, getOrCreateAlloca(Array)); |
839 | 13 | continue; |
840 | 13 | } |
841 | 111 | |
842 | 111 | auto *Inst = dyn_cast<Instruction>(Array->getBasePtr()); |
843 | 111 | |
844 | 111 | if (Inst && S.contains(Inst)98 ) |
845 | 74 | continue; |
846 | 37 | |
847 | 37 | // PHI nodes that are not marked as such in their SAI object are either exit |
848 | 37 | // PHI nodes we model as common scalars but without initialization, or |
849 | 37 | // incoming phi nodes that need to be initialized. Check if the first is the |
850 | 37 | // case for Inst and do not create and initialize memory if so. |
851 | 37 | if (auto *PHI = dyn_cast_or_null<PHINode>(Inst)) |
852 | 23 | if (!S.hasSingleExitEdge() && PHI->getBasicBlockIndex(ExitBB) >= 022 ) |
853 | 22 | continue; |
854 | 15 | |
855 | 15 | Builder.CreateStore(Array->getBasePtr(), getOrCreateAlloca(Array)); |
856 | 15 | } |
857 | 301 | } |
858 | | |
859 | 301 | void BlockGenerator::createScalarFinalization(Scop &S) { |
860 | 301 | // The exit block of the __unoptimized__ region. |
861 | 301 | BasicBlock *ExitBB = S.getExitingBlock(); |
862 | 301 | // The merge block __just after__ the region and the optimized region. |
863 | 301 | BasicBlock *MergeBB = S.getExit(); |
864 | 301 | |
865 | 301 | // The exit block of the __optimized__ region. |
866 | 301 | BasicBlock *OptExitBB = *(pred_begin(MergeBB)); |
867 | 301 | if (OptExitBB == ExitBB) |
868 | 0 | OptExitBB = *(++pred_begin(MergeBB)); |
869 | 301 | |
870 | 301 | Builder.SetInsertPoint(OptExitBB->getTerminator()); |
871 | 301 | for (const auto &EscapeMapping : EscapeMap) { |
872 | 39 | // Extract the escaping instruction and the escaping users as well as the |
873 | 39 | // alloca the instruction was demoted to. |
874 | 39 | Instruction *EscapeInst = EscapeMapping.first; |
875 | 39 | const auto &EscapeMappingValue = EscapeMapping.second; |
876 | 39 | const EscapeUserVectorTy &EscapeUsers = EscapeMappingValue.second; |
877 | 39 | Value *ScalarAddr = EscapeMappingValue.first; |
878 | 39 | |
879 | 39 | // Reload the demoted instruction in the optimized version of the SCoP. |
880 | 39 | Value *EscapeInstReload = |
881 | 39 | Builder.CreateLoad(ScalarAddr, EscapeInst->getName() + ".final_reload"); |
882 | 39 | EscapeInstReload = |
883 | 39 | Builder.CreateBitOrPointerCast(EscapeInstReload, EscapeInst->getType()); |
884 | 39 | |
885 | 39 | // Create the merge PHI that merges the optimized and unoptimized version. |
886 | 39 | PHINode *MergePHI = PHINode::Create(EscapeInst->getType(), 2, |
887 | 39 | EscapeInst->getName() + ".merge"); |
888 | 39 | MergePHI->insertBefore(&*MergeBB->getFirstInsertionPt()); |
889 | 39 | |
890 | 39 | // Add the respective values to the merge PHI. |
891 | 39 | MergePHI->addIncoming(EscapeInstReload, OptExitBB); |
892 | 39 | MergePHI->addIncoming(EscapeInst, ExitBB); |
893 | 39 | |
894 | 39 | // The information of scalar evolution about the escaping instruction needs |
895 | 39 | // to be revoked so the new merged instruction will be used. |
896 | 39 | if (SE.isSCEVable(EscapeInst->getType())) |
897 | 32 | SE.forgetValue(EscapeInst); |
898 | 39 | |
899 | 39 | // Replace all uses of the demoted instruction with the merge PHI. |
900 | 39 | for (Instruction *EUser : EscapeUsers) |
901 | 44 | EUser->replaceUsesOfWith(EscapeInst, MergePHI); |
902 | 39 | } |
903 | 301 | } |
904 | | |
905 | 301 | void BlockGenerator::findOutsideUsers(Scop &S) { |
906 | 640 | for (auto &Array : S.arrays()) { |
907 | 640 | |
908 | 640 | if (Array->getNumberOfDimensions() != 0) |
909 | 490 | continue; |
910 | 150 | |
911 | 150 | if (Array->isPHIKind()) |
912 | 39 | continue; |
913 | 111 | |
914 | 111 | auto *Inst = dyn_cast<Instruction>(Array->getBasePtr()); |
915 | 111 | |
916 | 111 | if (!Inst) |
917 | 13 | continue; |
918 | 98 | |
919 | 98 | // Scop invariant hoisting moves some of the base pointers out of the scop. |
920 | 98 | // We can ignore these, as the invariant load hoisting already registers the |
921 | 98 | // relevant outside users. |
922 | 98 | if (!S.contains(Inst)) |
923 | 24 | continue; |
924 | 74 | |
925 | 74 | handleOutsideUsers(S, Array); |
926 | 74 | } |
927 | 301 | } |
928 | | |
929 | 301 | void BlockGenerator::createExitPHINodeMerges(Scop &S) { |
930 | 301 | if (S.hasSingleExitEdge()) |
931 | 244 | return; |
932 | 57 | |
933 | 57 | auto *ExitBB = S.getExitingBlock(); |
934 | 57 | auto *MergeBB = S.getExit(); |
935 | 57 | auto *AfterMergeBB = MergeBB->getSingleSuccessor(); |
936 | 57 | BasicBlock *OptExitBB = *(pred_begin(MergeBB)); |
937 | 57 | if (OptExitBB == ExitBB) |
938 | 0 | OptExitBB = *(++pred_begin(MergeBB)); |
939 | 57 | |
940 | 57 | Builder.SetInsertPoint(OptExitBB->getTerminator()); |
941 | 57 | |
942 | 113 | for (auto &SAI : S.arrays()) { |
943 | 113 | auto *Val = SAI->getBasePtr(); |
944 | 113 | |
945 | 113 | // Only Value-like scalars need a merge PHI. Exit block PHIs receive either |
946 | 113 | // the original PHI's value or the reloaded incoming values from the |
947 | 113 | // generated code. An llvm::Value is merged between the original code's |
948 | 113 | // value or the generated one. |
949 | 113 | if (!SAI->isExitPHIKind()) |
950 | 92 | continue; |
951 | 21 | |
952 | 21 | PHINode *PHI = dyn_cast<PHINode>(Val); |
953 | 21 | if (!PHI) |
954 | 0 | continue; |
955 | 21 | |
956 | 21 | if (PHI->getParent() != AfterMergeBB) |
957 | 0 | continue; |
958 | 21 | |
959 | 21 | std::string Name = PHI->getName(); |
960 | 21 | Value *ScalarAddr = getOrCreateAlloca(SAI); |
961 | 21 | Value *Reload = Builder.CreateLoad(ScalarAddr, Name + ".ph.final_reload"); |
962 | 21 | Reload = Builder.CreateBitOrPointerCast(Reload, PHI->getType()); |
963 | 21 | Value *OriginalValue = PHI->getIncomingValueForBlock(MergeBB); |
964 | 21 | assert((!isa<Instruction>(OriginalValue) || |
965 | 21 | cast<Instruction>(OriginalValue)->getParent() != MergeBB) && |
966 | 21 | "Original value must no be one we just generated."); |
967 | 21 | auto *MergePHI = PHINode::Create(PHI->getType(), 2, Name + ".ph.merge"); |
968 | 21 | MergePHI->insertBefore(&*MergeBB->getFirstInsertionPt()); |
969 | 21 | MergePHI->addIncoming(Reload, OptExitBB); |
970 | 21 | MergePHI->addIncoming(OriginalValue, ExitBB); |
971 | 21 | int Idx = PHI->getBasicBlockIndex(MergeBB); |
972 | 21 | PHI->setIncomingValue(Idx, MergePHI); |
973 | 21 | } |
974 | 57 | } |
975 | | |
976 | 301 | void BlockGenerator::invalidateScalarEvolution(Scop &S) { |
977 | 301 | for (auto &Stmt : S) |
978 | 458 | if (Stmt.isCopyStmt()) |
979 | 4 | continue; |
980 | 454 | else if (Stmt.isBlockStmt()) |
981 | 419 | for (auto &Inst : *Stmt.getBasicBlock()) |
982 | 3.28k | SE.forgetValue(&Inst); |
983 | 35 | else if (Stmt.isRegionStmt()) |
984 | 35 | for (auto *BB : Stmt.getRegion()->blocks()) |
985 | 101 | for (auto &Inst : *BB) |
986 | 303 | SE.forgetValue(&Inst); |
987 | 35 | else |
988 | 35 | llvm_unreachable0 ("Unexpected statement type found"); |
989 | 301 | |
990 | 301 | // Invalidate SCEV of loops surrounding the EscapeUsers. |
991 | 301 | for (const auto &EscapeMapping : EscapeMap) { |
992 | 39 | const EscapeUserVectorTy &EscapeUsers = EscapeMapping.second.second; |
993 | 44 | for (Instruction *EUser : EscapeUsers) { |
994 | 44 | if (Loop *L = LI.getLoopFor(EUser->getParent())) |
995 | 34 | while (17 L) { |
996 | 17 | SE.forgetLoop(L); |
997 | 17 | L = L->getParentLoop(); |
998 | 17 | } |
999 | 44 | } |
1000 | 39 | } |
1001 | 301 | } |
1002 | | |
1003 | 301 | void BlockGenerator::finalizeSCoP(Scop &S) { |
1004 | 301 | findOutsideUsers(S); |
1005 | 301 | createScalarInitialization(S); |
1006 | 301 | createExitPHINodeMerges(S); |
1007 | 301 | createScalarFinalization(S); |
1008 | 301 | invalidateScalarEvolution(S); |
1009 | 301 | } |
1010 | | |
1011 | | VectorBlockGenerator::VectorBlockGenerator(BlockGenerator &BlockGen, |
1012 | | std::vector<LoopToScevMapT> &VLTS, |
1013 | | isl_map *Schedule) |
1014 | 20 | : BlockGenerator(BlockGen), VLTS(VLTS), Schedule(Schedule) { |
1015 | 20 | assert(Schedule && "No statement domain provided"); |
1016 | 20 | } |
1017 | | |
1018 | | Value *VectorBlockGenerator::getVectorValue(ScopStmt &Stmt, Value *Old, |
1019 | | ValueMapT &VectorMap, |
1020 | | VectorValueMapT &ScalarMaps, |
1021 | 32 | Loop *L) { |
1022 | 32 | if (Value *NewValue = VectorMap.lookup(Old)) |
1023 | 29 | return NewValue; |
1024 | 3 | |
1025 | 3 | int Width = getVectorWidth(); |
1026 | 3 | |
1027 | 3 | Value *Vector = UndefValue::get(VectorType::get(Old->getType(), Width)); |
1028 | 3 | |
1029 | 15 | for (int Lane = 0; Lane < Width; Lane++12 ) |
1030 | 12 | Vector = Builder.CreateInsertElement( |
1031 | 12 | Vector, getNewValue(Stmt, Old, ScalarMaps[Lane], VLTS[Lane], L), |
1032 | 12 | Builder.getInt32(Lane)); |
1033 | 3 | |
1034 | 3 | VectorMap[Old] = Vector; |
1035 | 3 | |
1036 | 3 | return Vector; |
1037 | 3 | } |
1038 | | |
1039 | 27 | Type *VectorBlockGenerator::getVectorPtrTy(const Value *Val, int Width) { |
1040 | 27 | PointerType *PointerTy = dyn_cast<PointerType>(Val->getType()); |
1041 | 27 | assert(PointerTy && "PointerType expected"); |
1042 | 27 | |
1043 | 27 | Type *ScalarType = PointerTy->getElementType(); |
1044 | 27 | VectorType *VectorType = VectorType::get(ScalarType, Width); |
1045 | 27 | |
1046 | 27 | return PointerType::getUnqual(VectorType); |
1047 | 27 | } |
1048 | | |
1049 | | Value *VectorBlockGenerator::generateStrideOneLoad( |
1050 | | ScopStmt &Stmt, LoadInst *Load, VectorValueMapT &ScalarMaps, |
1051 | 8 | __isl_keep isl_id_to_ast_expr *NewAccesses, bool NegativeStride = false) { |
1052 | 8 | unsigned VectorWidth = getVectorWidth(); |
1053 | 8 | auto *Pointer = Load->getPointerOperand(); |
1054 | 8 | Type *VectorPtrType = getVectorPtrTy(Pointer, VectorWidth); |
1055 | 8 | unsigned Offset = NegativeStride ? VectorWidth - 11 : 07 ; |
1056 | 8 | |
1057 | 8 | Value *NewPointer = generateLocationAccessed(Stmt, Load, ScalarMaps[Offset], |
1058 | 8 | VLTS[Offset], NewAccesses); |
1059 | 8 | Value *VectorPtr = |
1060 | 8 | Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr"); |
1061 | 8 | LoadInst *VecLoad = |
1062 | 8 | Builder.CreateLoad(VectorPtr, Load->getName() + "_p_vec_full"); |
1063 | 8 | if (!Aligned) |
1064 | 8 | VecLoad->setAlignment(8); |
1065 | 8 | |
1066 | 8 | if (NegativeStride) { |
1067 | 1 | SmallVector<Constant *, 16> Indices; |
1068 | 5 | for (int i = VectorWidth - 1; i >= 0; i--4 ) |
1069 | 4 | Indices.push_back(ConstantInt::get(Builder.getInt32Ty(), i)); |
1070 | 1 | Constant *SV = llvm::ConstantVector::get(Indices); |
1071 | 1 | Value *RevVecLoad = Builder.CreateShuffleVector( |
1072 | 1 | VecLoad, VecLoad, SV, Load->getName() + "_reverse"); |
1073 | 1 | return RevVecLoad; |
1074 | 1 | } |
1075 | 7 | |
1076 | 7 | return VecLoad; |
1077 | 7 | } |
1078 | | |
1079 | | Value *VectorBlockGenerator::generateStrideZeroLoad( |
1080 | | ScopStmt &Stmt, LoadInst *Load, ValueMapT &BBMap, |
1081 | 3 | __isl_keep isl_id_to_ast_expr *NewAccesses) { |
1082 | 3 | auto *Pointer = Load->getPointerOperand(); |
1083 | 3 | Type *VectorPtrType = getVectorPtrTy(Pointer, 1); |
1084 | 3 | Value *NewPointer = |
1085 | 3 | generateLocationAccessed(Stmt, Load, BBMap, VLTS[0], NewAccesses); |
1086 | 3 | Value *VectorPtr = Builder.CreateBitCast(NewPointer, VectorPtrType, |
1087 | 3 | Load->getName() + "_p_vec_p"); |
1088 | 3 | LoadInst *ScalarLoad = |
1089 | 3 | Builder.CreateLoad(VectorPtr, Load->getName() + "_p_splat_one"); |
1090 | 3 | |
1091 | 3 | if (!Aligned) |
1092 | 3 | ScalarLoad->setAlignment(8); |
1093 | 3 | |
1094 | 3 | Constant *SplatVector = Constant::getNullValue( |
1095 | 3 | VectorType::get(Builder.getInt32Ty(), getVectorWidth())); |
1096 | 3 | |
1097 | 3 | Value *VectorLoad = Builder.CreateShuffleVector( |
1098 | 3 | ScalarLoad, ScalarLoad, SplatVector, Load->getName() + "_p_splat"); |
1099 | 3 | return VectorLoad; |
1100 | 3 | } |
1101 | | |
1102 | | Value *VectorBlockGenerator::generateUnknownStrideLoad( |
1103 | | ScopStmt &Stmt, LoadInst *Load, VectorValueMapT &ScalarMaps, |
1104 | 3 | __isl_keep isl_id_to_ast_expr *NewAccesses) { |
1105 | 3 | int VectorWidth = getVectorWidth(); |
1106 | 3 | auto *Pointer = Load->getPointerOperand(); |
1107 | 3 | VectorType *VectorType = VectorType::get( |
1108 | 3 | dyn_cast<PointerType>(Pointer->getType())->getElementType(), VectorWidth); |
1109 | 3 | |
1110 | 3 | Value *Vector = UndefValue::get(VectorType); |
1111 | 3 | |
1112 | 23 | for (int i = 0; i < VectorWidth; i++20 ) { |
1113 | 20 | Value *NewPointer = generateLocationAccessed(Stmt, Load, ScalarMaps[i], |
1114 | 20 | VLTS[i], NewAccesses); |
1115 | 20 | Value *ScalarLoad = |
1116 | 20 | Builder.CreateLoad(NewPointer, Load->getName() + "_p_scalar_"); |
1117 | 20 | Vector = Builder.CreateInsertElement( |
1118 | 20 | Vector, ScalarLoad, Builder.getInt32(i), Load->getName() + "_p_vec_"); |
1119 | 20 | } |
1120 | 3 | |
1121 | 3 | return Vector; |
1122 | 3 | } |
1123 | | |
1124 | | void VectorBlockGenerator::generateLoad( |
1125 | | ScopStmt &Stmt, LoadInst *Load, ValueMapT &VectorMap, |
1126 | 20 | VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) { |
1127 | 20 | if (Value *PreloadLoad = GlobalMap.lookup(Load)) { |
1128 | 6 | VectorMap[Load] = Builder.CreateVectorSplat(getVectorWidth(), PreloadLoad, |
1129 | 6 | Load->getName() + "_p"); |
1130 | 6 | return; |
1131 | 6 | } |
1132 | 14 | |
1133 | 14 | if (!VectorType::isValidElementType(Load->getType())) { |
1134 | 0 | for (int i = 0; i < getVectorWidth(); i++) |
1135 | 0 | ScalarMaps[i][Load] = |
1136 | 0 | generateArrayLoad(Stmt, Load, ScalarMaps[i], VLTS[i], NewAccesses); |
1137 | 0 | return; |
1138 | 0 | } |
1139 | 14 | |
1140 | 14 | const MemoryAccess &Access = Stmt.getArrayAccessFor(Load); |
1141 | 14 | |
1142 | 14 | // Make sure we have scalar values available to access the pointer to |
1143 | 14 | // the data location. |
1144 | 14 | extractScalarValues(Load, VectorMap, ScalarMaps); |
1145 | 14 | |
1146 | 14 | Value *NewLoad; |
1147 | 14 | if (Access.isStrideZero(isl::manage_copy(Schedule))) |
1148 | 3 | NewLoad = generateStrideZeroLoad(Stmt, Load, ScalarMaps[0], NewAccesses); |
1149 | 11 | else if (Access.isStrideOne(isl::manage_copy(Schedule))) |
1150 | 7 | NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, NewAccesses); |
1151 | 4 | else if (Access.isStrideX(isl::manage_copy(Schedule), -1)) |
1152 | 1 | NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, NewAccesses, true); |
1153 | 3 | else |
1154 | 3 | NewLoad = generateUnknownStrideLoad(Stmt, Load, ScalarMaps, NewAccesses); |
1155 | 14 | |
1156 | 14 | VectorMap[Load] = NewLoad; |
1157 | 14 | } |
1158 | | |
1159 | | void VectorBlockGenerator::copyUnaryInst(ScopStmt &Stmt, UnaryInstruction *Inst, |
1160 | | ValueMapT &VectorMap, |
1161 | 1 | VectorValueMapT &ScalarMaps) { |
1162 | 1 | int VectorWidth = getVectorWidth(); |
1163 | 1 | Value *NewOperand = getVectorValue(Stmt, Inst->getOperand(0), VectorMap, |
1164 | 1 | ScalarMaps, getLoopForStmt(Stmt)); |
1165 | 1 | |
1166 | 1 | assert(isa<CastInst>(Inst) && "Can not generate vector code for instruction"); |
1167 | 1 | |
1168 | 1 | const CastInst *Cast = dyn_cast<CastInst>(Inst); |
1169 | 1 | VectorType *DestType = VectorType::get(Inst->getType(), VectorWidth); |
1170 | 1 | VectorMap[Inst] = Builder.CreateCast(Cast->getOpcode(), NewOperand, DestType); |
1171 | 1 | } |
1172 | | |
1173 | | void VectorBlockGenerator::copyBinaryInst(ScopStmt &Stmt, BinaryOperator *Inst, |
1174 | | ValueMapT &VectorMap, |
1175 | 7 | VectorValueMapT &ScalarMaps) { |
1176 | 7 | Loop *L = getLoopForStmt(Stmt); |
1177 | 7 | Value *OpZero = Inst->getOperand(0); |
1178 | 7 | Value *OpOne = Inst->getOperand(1); |
1179 | 7 | |
1180 | 7 | Value *NewOpZero, *NewOpOne; |
1181 | 7 | NewOpZero = getVectorValue(Stmt, OpZero, VectorMap, ScalarMaps, L); |
1182 | 7 | NewOpOne = getVectorValue(Stmt, OpOne, VectorMap, ScalarMaps, L); |
1183 | 7 | |
1184 | 7 | Value *NewInst = Builder.CreateBinOp(Inst->getOpcode(), NewOpZero, NewOpOne, |
1185 | 7 | Inst->getName() + "p_vec"); |
1186 | 7 | VectorMap[Inst] = NewInst; |
1187 | 7 | } |
1188 | | |
1189 | | void VectorBlockGenerator::copyStore( |
1190 | | ScopStmt &Stmt, StoreInst *Store, ValueMapT &VectorMap, |
1191 | 17 | VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) { |
1192 | 17 | const MemoryAccess &Access = Stmt.getArrayAccessFor(Store); |
1193 | 17 | |
1194 | 17 | auto *Pointer = Store->getPointerOperand(); |
1195 | 17 | Value *Vector = getVectorValue(Stmt, Store->getValueOperand(), VectorMap, |
1196 | 17 | ScalarMaps, getLoopForStmt(Stmt)); |
1197 | 17 | |
1198 | 17 | // Make sure we have scalar values available to access the pointer to |
1199 | 17 | // the data location. |
1200 | 17 | extractScalarValues(Store, VectorMap, ScalarMaps); |
1201 | 17 | |
1202 | 17 | if (Access.isStrideOne(isl::manage_copy(Schedule))) { |
1203 | 15 | Type *VectorPtrType = getVectorPtrTy(Pointer, getVectorWidth()); |
1204 | 15 | Value *NewPointer = generateLocationAccessed(Stmt, Store, ScalarMaps[0], |
1205 | 15 | VLTS[0], NewAccesses); |
1206 | 15 | |
1207 | 15 | Value *VectorPtr = |
1208 | 15 | Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr"); |
1209 | 15 | StoreInst *Store = Builder.CreateStore(Vector, VectorPtr); |
1210 | 15 | |
1211 | 15 | if (!Aligned) |
1212 | 15 | Store->setAlignment(8); |
1213 | 15 | } else { |
1214 | 10 | for (unsigned i = 0; i < ScalarMaps.size(); i++8 ) { |
1215 | 8 | Value *Scalar = Builder.CreateExtractElement(Vector, Builder.getInt32(i)); |
1216 | 8 | Value *NewPointer = generateLocationAccessed(Stmt, Store, ScalarMaps[i], |
1217 | 8 | VLTS[i], NewAccesses); |
1218 | 8 | Builder.CreateStore(Scalar, NewPointer); |
1219 | 8 | } |
1220 | 2 | } |
1221 | 17 | } |
1222 | | |
1223 | | bool VectorBlockGenerator::hasVectorOperands(const Instruction *Inst, |
1224 | 40 | ValueMapT &VectorMap) { |
1225 | 40 | for (Value *Operand : Inst->operands()) |
1226 | 51 | if (VectorMap.count(Operand)) |
1227 | 28 | return true; |
1228 | 40 | return false12 ; |
1229 | 40 | } |
1230 | | |
1231 | | bool VectorBlockGenerator::extractScalarValues(const Instruction *Inst, |
1232 | | ValueMapT &VectorMap, |
1233 | 46 | VectorValueMapT &ScalarMaps) { |
1234 | 46 | bool HasVectorOperand = false; |
1235 | 46 | int VectorWidth = getVectorWidth(); |
1236 | 46 | |
1237 | 77 | for (Value *Operand : Inst->operands()) { |
1238 | 77 | ValueMapT::iterator VecOp = VectorMap.find(Operand); |
1239 | 77 | |
1240 | 77 | if (VecOp == VectorMap.end()) |
1241 | 57 | continue; |
1242 | 20 | |
1243 | 20 | HasVectorOperand = true; |
1244 | 20 | Value *NewVector = VecOp->second; |
1245 | 20 | |
1246 | 115 | for (int i = 0; i < VectorWidth; ++i95 ) { |
1247 | 98 | ValueMapT &SM = ScalarMaps[i]; |
1248 | 98 | |
1249 | 98 | // If there is one scalar extracted, all scalar elements should have |
1250 | 98 | // already been extracted by the code here. So no need to check for the |
1251 | 98 | // existence of all of them. |
1252 | 98 | if (SM.count(Operand)) |
1253 | 3 | break; |
1254 | 95 | |
1255 | 95 | SM[Operand] = |
1256 | 95 | Builder.CreateExtractElement(NewVector, Builder.getInt32(i)); |
1257 | 95 | } |
1258 | 20 | } |
1259 | 46 | |
1260 | 46 | return HasVectorOperand; |
1261 | 46 | } |
1262 | | |
1263 | | void VectorBlockGenerator::copyInstScalarized( |
1264 | | ScopStmt &Stmt, Instruction *Inst, ValueMapT &VectorMap, |
1265 | 15 | VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) { |
1266 | 15 | bool HasVectorOperand; |
1267 | 15 | int VectorWidth = getVectorWidth(); |
1268 | 15 | |
1269 | 15 | HasVectorOperand = extractScalarValues(Inst, VectorMap, ScalarMaps); |
1270 | 15 | |
1271 | 91 | for (int VectorLane = 0; VectorLane < getVectorWidth(); VectorLane++76 ) |
1272 | 76 | BlockGenerator::copyInstruction(Stmt, Inst, ScalarMaps[VectorLane], |
1273 | 76 | VLTS[VectorLane], NewAccesses); |
1274 | 15 | |
1275 | 15 | if (!VectorType::isValidElementType(Inst->getType()) || !HasVectorOperand12 ) |
1276 | 12 | return; |
1277 | 3 | |
1278 | 3 | // Make the result available as vector value. |
1279 | 3 | VectorType *VectorType = VectorType::get(Inst->getType(), VectorWidth); |
1280 | 3 | Value *Vector = UndefValue::get(VectorType); |
1281 | 3 | |
1282 | 15 | for (int i = 0; i < VectorWidth; i++12 ) |
1283 | 12 | Vector = Builder.CreateInsertElement(Vector, ScalarMaps[i][Inst], |
1284 | 12 | Builder.getInt32(i)); |
1285 | 3 | |
1286 | 3 | VectorMap[Inst] = Vector; |
1287 | 3 | } |
1288 | | |
1289 | 212 | int VectorBlockGenerator::getVectorWidth() { return VLTS.size(); } |
1290 | | |
1291 | | void VectorBlockGenerator::copyInstruction( |
1292 | | ScopStmt &Stmt, Instruction *Inst, ValueMapT &VectorMap, |
1293 | 122 | VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) { |
1294 | 122 | // Terminator instructions control the control flow. They are explicitly |
1295 | 122 | // expressed in the clast and do not need to be copied. |
1296 | 122 | if (Inst->isTerminator()) |
1297 | 20 | return; |
1298 | 102 | |
1299 | 102 | if (canSyntheziseInStmt(Stmt, Inst)) |
1300 | 42 | return; |
1301 | 60 | |
1302 | 60 | if (auto *Load = dyn_cast<LoadInst>(Inst)) { |
1303 | 20 | generateLoad(Stmt, Load, VectorMap, ScalarMaps, NewAccesses); |
1304 | 20 | return; |
1305 | 20 | } |
1306 | 40 | |
1307 | 40 | if (hasVectorOperands(Inst, VectorMap)) { |
1308 | 28 | if (auto *Store = dyn_cast<StoreInst>(Inst)) { |
1309 | 17 | // Identified as redundant by -polly-simplify. |
1310 | 17 | if (!Stmt.getArrayAccessOrNULLFor(Store)) |
1311 | 0 | return; |
1312 | 17 | |
1313 | 17 | copyStore(Stmt, Store, VectorMap, ScalarMaps, NewAccesses); |
1314 | 17 | return; |
1315 | 17 | } |
1316 | 11 | |
1317 | 11 | if (auto *Unary = dyn_cast<UnaryInstruction>(Inst)) { |
1318 | 1 | copyUnaryInst(Stmt, Unary, VectorMap, ScalarMaps); |
1319 | 1 | return; |
1320 | 1 | } |
1321 | 10 | |
1322 | 10 | if (auto *Binary = dyn_cast<BinaryOperator>(Inst)) { |
1323 | 7 | copyBinaryInst(Stmt, Binary, VectorMap, ScalarMaps); |
1324 | 7 | return; |
1325 | 7 | } |
1326 | 15 | |
1327 | 15 | // Fallthrough: We generate scalar instructions, if we don't know how to |
1328 | 15 | // generate vector code. |
1329 | 15 | } |
1330 | 15 | |
1331 | 15 | copyInstScalarized(Stmt, Inst, VectorMap, ScalarMaps, NewAccesses); |
1332 | 15 | } |
1333 | | |
1334 | | void VectorBlockGenerator::generateScalarVectorLoads( |
1335 | 20 | ScopStmt &Stmt, ValueMapT &VectorBlockMap) { |
1336 | 35 | for (MemoryAccess *MA : Stmt) { |
1337 | 35 | if (MA->isArrayKind() || MA->isWrite()1 ) |
1338 | 34 | continue; |
1339 | 1 | |
1340 | 1 | auto *Address = getOrCreateAlloca(*MA); |
1341 | 1 | Type *VectorPtrType = getVectorPtrTy(Address, 1); |
1342 | 1 | Value *VectorPtr = Builder.CreateBitCast(Address, VectorPtrType, |
1343 | 1 | Address->getName() + "_p_vec_p"); |
1344 | 1 | auto *Val = Builder.CreateLoad(VectorPtr, Address->getName() + ".reload"); |
1345 | 1 | Constant *SplatVector = Constant::getNullValue( |
1346 | 1 | VectorType::get(Builder.getInt32Ty(), getVectorWidth())); |
1347 | 1 | |
1348 | 1 | Value *VectorVal = Builder.CreateShuffleVector( |
1349 | 1 | Val, Val, SplatVector, Address->getName() + "_p_splat"); |
1350 | 1 | VectorBlockMap[MA->getAccessValue()] = VectorVal; |
1351 | 1 | } |
1352 | 20 | } |
1353 | | |
1354 | 20 | void VectorBlockGenerator::verifyNoScalarStores(ScopStmt &Stmt) { |
1355 | 35 | for (MemoryAccess *MA : Stmt) { |
1356 | 35 | if (MA->isArrayKind() || MA->isRead()1 ) |
1357 | 35 | continue; |
1358 | 0 | |
1359 | 0 | llvm_unreachable("Scalar stores not expected in vector loop"); |
1360 | 0 | } |
1361 | 20 | } |
1362 | | |
1363 | | void VectorBlockGenerator::copyStmt( |
1364 | 20 | ScopStmt &Stmt, __isl_keep isl_id_to_ast_expr *NewAccesses) { |
1365 | 20 | assert(Stmt.isBlockStmt() && |
1366 | 20 | "TODO: Only block statements can be copied by the vector block " |
1367 | 20 | "generator"); |
1368 | 20 | |
1369 | 20 | BasicBlock *BB = Stmt.getBasicBlock(); |
1370 | 20 | BasicBlock *CopyBB = SplitBlock(Builder.GetInsertBlock(), |
1371 | 20 | &*Builder.GetInsertPoint(), &DT, &LI); |
1372 | 20 | CopyBB->setName("polly.stmt." + BB->getName()); |
1373 | 20 | Builder.SetInsertPoint(&CopyBB->front()); |
1374 | 20 | |
1375 | 20 | // Create two maps that store the mapping from the original instructions of |
1376 | 20 | // the old basic block to their copies in the new basic block. Those maps |
1377 | 20 | // are basic block local. |
1378 | 20 | // |
1379 | 20 | // As vector code generation is supported there is one map for scalar values |
1380 | 20 | // and one for vector values. |
1381 | 20 | // |
1382 | 20 | // In case we just do scalar code generation, the vectorMap is not used and |
1383 | 20 | // the scalarMap has just one dimension, which contains the mapping. |
1384 | 20 | // |
1385 | 20 | // In case vector code generation is done, an instruction may either appear |
1386 | 20 | // in the vector map once (as it is calculating >vectorwidth< values at a |
1387 | 20 | // time. Or (if the values are calculated using scalar operations), it |
1388 | 20 | // appears once in every dimension of the scalarMap. |
1389 | 20 | VectorValueMapT ScalarBlockMap(getVectorWidth()); |
1390 | 20 | ValueMapT VectorBlockMap; |
1391 | 20 | |
1392 | 20 | generateScalarVectorLoads(Stmt, VectorBlockMap); |
1393 | 20 | |
1394 | 20 | for (Instruction &Inst : *BB) |
1395 | 122 | copyInstruction(Stmt, &Inst, VectorBlockMap, ScalarBlockMap, NewAccesses); |
1396 | 20 | |
1397 | 20 | verifyNoScalarStores(Stmt); |
1398 | 20 | } |
1399 | | |
1400 | | BasicBlock *RegionGenerator::repairDominance(BasicBlock *BB, |
1401 | 104 | BasicBlock *BBCopy) { |
1402 | 104 | |
1403 | 104 | BasicBlock *BBIDom = DT.getNode(BB)->getIDom()->getBlock(); |
1404 | 104 | BasicBlock *BBCopyIDom = EndBlockMap.lookup(BBIDom); |
1405 | 104 | |
1406 | 104 | if (BBCopyIDom) |
1407 | 103 | DT.changeImmediateDominator(BBCopy, BBCopyIDom); |
1408 | 104 | |
1409 | 104 | return StartBlockMap.lookup(BBIDom); |
1410 | 104 | } |
1411 | | |
1412 | | // This is to determine whether an llvm::Value (defined in @p BB) is usable when |
1413 | | // leaving a subregion. The straight-forward DT.dominates(BB, R->getExitBlock()) |
1414 | | // does not work in cases where the exit block has edges from outside the |
1415 | | // region. In that case the llvm::Value would never be usable in in the exit |
1416 | | // block. The RegionGenerator however creates an new exit block ('ExitBBCopy') |
1417 | | // for the subregion's exiting edges only. We need to determine whether an |
1418 | | // llvm::Value is usable in there. We do this by checking whether it dominates |
1419 | | // all exiting blocks individually. |
1420 | | static bool isDominatingSubregionExit(const DominatorTree &DT, Region *R, |
1421 | 104 | BasicBlock *BB) { |
1422 | 193 | for (auto ExitingBB : predecessors(R->getExit())) { |
1423 | 193 | // Check for non-subregion incoming edges. |
1424 | 193 | if (!R->contains(ExitingBB)) |
1425 | 27 | continue; |
1426 | 166 | |
1427 | 166 | if (!DT.dominates(BB, ExitingBB)) |
1428 | 60 | return false; |
1429 | 166 | } |
1430 | 104 | |
1431 | 104 | return true44 ; |
1432 | 104 | } |
1433 | | |
1434 | | // Find the direct dominator of the subregion's exit block if the subregion was |
1435 | | // simplified. |
1436 | 36 | static BasicBlock *findExitDominator(DominatorTree &DT, Region *R) { |
1437 | 36 | BasicBlock *Common = nullptr; |
1438 | 74 | for (auto ExitingBB : predecessors(R->getExit())) { |
1439 | 74 | // Check for non-subregion incoming edges. |
1440 | 74 | if (!R->contains(ExitingBB)) |
1441 | 9 | continue; |
1442 | 65 | |
1443 | 65 | // First exiting edge. |
1444 | 65 | if (!Common) { |
1445 | 36 | Common = ExitingBB; |
1446 | 36 | continue; |
1447 | 36 | } |
1448 | 29 | |
1449 | 29 | Common = DT.findNearestCommonDominator(Common, ExitingBB); |
1450 | 29 | } |
1451 | 36 | |
1452 | 36 | assert(Common && R->contains(Common)); |
1453 | 36 | return Common; |
1454 | 36 | } |
1455 | | |
1456 | | void RegionGenerator::copyStmt(ScopStmt &Stmt, LoopToScevMapT <S, |
1457 | 36 | isl_id_to_ast_expr *IdToAstExp) { |
1458 | 36 | assert(Stmt.isRegionStmt() && |
1459 | 36 | "Only region statements can be copied by the region generator"); |
1460 | 36 | |
1461 | 36 | // Forget all old mappings. |
1462 | 36 | StartBlockMap.clear(); |
1463 | 36 | EndBlockMap.clear(); |
1464 | 36 | RegionMaps.clear(); |
1465 | 36 | IncompletePHINodeMap.clear(); |
1466 | 36 | |
1467 | 36 | // Collection of all values related to this subregion. |
1468 | 36 | ValueMapT ValueMap; |
1469 | 36 | |
1470 | 36 | // The region represented by the statement. |
1471 | 36 | Region *R = Stmt.getRegion(); |
1472 | 36 | |
1473 | 36 | // Create a dedicated entry for the region where we can reload all demoted |
1474 | 36 | // inputs. |
1475 | 36 | BasicBlock *EntryBB = R->getEntry(); |
1476 | 36 | BasicBlock *EntryBBCopy = SplitBlock(Builder.GetInsertBlock(), |
1477 | 36 | &*Builder.GetInsertPoint(), &DT, &LI); |
1478 | 36 | EntryBBCopy->setName("polly.stmt." + EntryBB->getName() + ".entry"); |
1479 | 36 | Builder.SetInsertPoint(&EntryBBCopy->front()); |
1480 | 36 | |
1481 | 36 | ValueMapT &EntryBBMap = RegionMaps[EntryBBCopy]; |
1482 | 36 | generateScalarLoads(Stmt, LTS, EntryBBMap, IdToAstExp); |
1483 | 36 | generateBeginStmtTrace(Stmt, LTS, EntryBBMap); |
1484 | 36 | |
1485 | 88 | for (auto PI = pred_begin(EntryBB), PE = pred_end(EntryBB); PI != PE; ++PI52 ) |
1486 | 52 | if (!R->contains(*PI)) { |
1487 | 50 | StartBlockMap[*PI] = EntryBBCopy; |
1488 | 50 | EndBlockMap[*PI] = EntryBBCopy; |
1489 | 50 | } |
1490 | 36 | |
1491 | 36 | // Iterate over all blocks in the region in a breadth-first search. |
1492 | 36 | std::deque<BasicBlock *> Blocks; |
1493 | 36 | SmallSetVector<BasicBlock *, 8> SeenBlocks; |
1494 | 36 | Blocks.push_back(EntryBB); |
1495 | 36 | SeenBlocks.insert(EntryBB); |
1496 | 36 | |
1497 | 140 | while (!Blocks.empty()) { |
1498 | 104 | BasicBlock *BB = Blocks.front(); |
1499 | 104 | Blocks.pop_front(); |
1500 | 104 | |
1501 | 104 | // First split the block and update dominance information. |
1502 | 104 | BasicBlock *BBCopy = splitBB(BB); |
1503 | 104 | BasicBlock *BBCopyIDom = repairDominance(BB, BBCopy); |
1504 | 104 | |
1505 | 104 | // Get the mapping for this block and initialize it with either the scalar |
1506 | 104 | // loads from the generated entering block (which dominates all blocks of |
1507 | 104 | // this subregion) or the maps of the immediate dominator, if part of the |
1508 | 104 | // subregion. The latter necessarily includes the former. |
1509 | 104 | ValueMapT *InitBBMap; |
1510 | 104 | if (BBCopyIDom) { |
1511 | 103 | assert(RegionMaps.count(BBCopyIDom)); |
1512 | 103 | InitBBMap = &RegionMaps[BBCopyIDom]; |
1513 | 103 | } else |
1514 | 1 | InitBBMap = &EntryBBMap; |
1515 | 104 | auto Inserted = RegionMaps.insert(std::make_pair(BBCopy, *InitBBMap)); |
1516 | 104 | ValueMapT &RegionMap = Inserted.first->second; |
1517 | 104 | |
1518 | 104 | // Copy the block with the BlockGenerator. |
1519 | 104 | Builder.SetInsertPoint(&BBCopy->front()); |
1520 | 104 | copyBB(Stmt, BB, BBCopy, RegionMap, LTS, IdToAstExp); |
1521 | 104 | |
1522 | 104 | // In order to remap PHI nodes we store also basic block mappings. |
1523 | 104 | StartBlockMap[BB] = BBCopy; |
1524 | 104 | EndBlockMap[BB] = Builder.GetInsertBlock(); |
1525 | 104 | |
1526 | 104 | // Add values to incomplete PHI nodes waiting for this block to be copied. |
1527 | 104 | for (const PHINodePairTy &PHINodePair : IncompletePHINodeMap[BB]) |
1528 | 3 | addOperandToPHI(Stmt, PHINodePair.first, PHINodePair.second, BB, LTS); |
1529 | 104 | IncompletePHINodeMap[BB].clear(); |
1530 | 104 | |
1531 | 104 | // And continue with new successors inside the region. |
1532 | 251 | for (auto SI = succ_begin(BB), SE = succ_end(BB); SI != SE; SI++147 ) |
1533 | 147 | if (R->contains(*SI) && SeenBlocks.insert(*SI)82 ) |
1534 | 68 | Blocks.push_back(*SI); |
1535 | 104 | |
1536 | 104 | // Remember value in case it is visible after this subregion. |
1537 | 104 | if (isDominatingSubregionExit(DT, R, BB)) |
1538 | 44 | ValueMap.insert(RegionMap.begin(), RegionMap.end()); |
1539 | 104 | } |
1540 | 36 | |
1541 | 36 | // Now create a new dedicated region exit block and add it to the region map. |
1542 | 36 | BasicBlock *ExitBBCopy = SplitBlock(Builder.GetInsertBlock(), |
1543 | 36 | &*Builder.GetInsertPoint(), &DT, &LI); |
1544 | 36 | ExitBBCopy->setName("polly.stmt." + R->getExit()->getName() + ".exit"); |
1545 | 36 | StartBlockMap[R->getExit()] = ExitBBCopy; |
1546 | 36 | EndBlockMap[R->getExit()] = ExitBBCopy; |
1547 | 36 | |
1548 | 36 | BasicBlock *ExitDomBBCopy = EndBlockMap.lookup(findExitDominator(DT, R)); |
1549 | 36 | assert(ExitDomBBCopy && |
1550 | 36 | "Common exit dominator must be within region; at least the entry node " |
1551 | 36 | "must match"); |
1552 | 36 | DT.changeImmediateDominator(ExitBBCopy, ExitDomBBCopy); |
1553 | 36 | |
1554 | 36 | // As the block generator doesn't handle control flow we need to add the |
1555 | 36 | // region control flow by hand after all blocks have been copied. |
1556 | 104 | for (BasicBlock *BB : SeenBlocks) { |
1557 | 104 | |
1558 | 104 | BasicBlock *BBCopyStart = StartBlockMap[BB]; |
1559 | 104 | BasicBlock *BBCopyEnd = EndBlockMap[BB]; |
1560 | 104 | Instruction *TI = BB->getTerminator(); |
1561 | 104 | if (isa<UnreachableInst>(TI)) { |
1562 | 0 | while (!BBCopyEnd->empty()) |
1563 | 0 | BBCopyEnd->begin()->eraseFromParent(); |
1564 | 0 | new UnreachableInst(BBCopyEnd->getContext(), BBCopyEnd); |
1565 | 0 | continue; |
1566 | 0 | } |
1567 | 104 | |
1568 | 104 | Instruction *BICopy = BBCopyEnd->getTerminator(); |
1569 | 104 | |
1570 | 104 | ValueMapT &RegionMap = RegionMaps[BBCopyStart]; |
1571 | 104 | RegionMap.insert(StartBlockMap.begin(), StartBlockMap.end()); |
1572 | 104 | |
1573 | 104 | Builder.SetInsertPoint(BICopy); |
1574 | 104 | copyInstScalar(Stmt, TI, RegionMap, LTS); |
1575 | 104 | BICopy->eraseFromParent(); |
1576 | 104 | } |
1577 | 36 | |
1578 | 36 | // Add counting PHI nodes to all loops in the region that can be used as |
1579 | 36 | // replacement for SCEVs referring to the old loop. |
1580 | 104 | for (BasicBlock *BB : SeenBlocks) { |
1581 | 104 | Loop *L = LI.getLoopFor(BB); |
1582 | 104 | if (L == nullptr || L->getHeader() != BB78 || !R->contains(L)13 ) |
1583 | 102 | continue; |
1584 | 2 | |
1585 | 2 | BasicBlock *BBCopy = StartBlockMap[BB]; |
1586 | 2 | Value *NullVal = Builder.getInt32(0); |
1587 | 2 | PHINode *LoopPHI = |
1588 | 2 | PHINode::Create(Builder.getInt32Ty(), 2, "polly.subregion.iv"); |
1589 | 2 | Instruction *LoopPHIInc = BinaryOperator::CreateAdd( |
1590 | 2 | LoopPHI, Builder.getInt32(1), "polly.subregion.iv.inc"); |
1591 | 2 | LoopPHI->insertBefore(&BBCopy->front()); |
1592 | 2 | LoopPHIInc->insertBefore(BBCopy->getTerminator()); |
1593 | 2 | |
1594 | 4 | for (auto *PredBB : make_range(pred_begin(BB), pred_end(BB))) { |
1595 | 4 | if (!R->contains(PredBB)) |
1596 | 2 | continue; |
1597 | 2 | if (L->contains(PredBB)) |
1598 | 2 | LoopPHI->addIncoming(LoopPHIInc, EndBlockMap[PredBB]); |
1599 | 0 | else |
1600 | 0 | LoopPHI->addIncoming(NullVal, EndBlockMap[PredBB]); |
1601 | 2 | } |
1602 | 2 | |
1603 | 2 | for (auto *PredBBCopy : make_range(pred_begin(BBCopy), pred_end(BBCopy))) |
1604 | 4 | if (LoopPHI->getBasicBlockIndex(PredBBCopy) < 0) |
1605 | 2 | LoopPHI->addIncoming(NullVal, PredBBCopy); |
1606 | 2 | |
1607 | 2 | LTS[L] = SE.getUnknown(LoopPHI); |
1608 | 2 | } |
1609 | 36 | |
1610 | 36 | // Continue generating code in the exit block. |
1611 | 36 | Builder.SetInsertPoint(&*ExitBBCopy->getFirstInsertionPt()); |
1612 | 36 | |
1613 | 36 | // Write values visible to other statements. |
1614 | 36 | generateScalarStores(Stmt, LTS, ValueMap, IdToAstExp); |
1615 | 36 | StartBlockMap.clear(); |
1616 | 36 | EndBlockMap.clear(); |
1617 | 36 | RegionMaps.clear(); |
1618 | 36 | IncompletePHINodeMap.clear(); |
1619 | 36 | } |
1620 | | |
1621 | | PHINode *RegionGenerator::buildExitPHI(MemoryAccess *MA, LoopToScevMapT <S, |
1622 | 13 | ValueMapT &BBMap, Loop *L) { |
1623 | 13 | ScopStmt *Stmt = MA->getStatement(); |
1624 | 13 | Region *SubR = Stmt->getRegion(); |
1625 | 13 | auto Incoming = MA->getIncoming(); |
1626 | 13 | |
1627 | 13 | PollyIRBuilder::InsertPointGuard IPGuard(Builder); |
1628 | 13 | PHINode *OrigPHI = cast<PHINode>(MA->getAccessInstruction()); |
1629 | 13 | BasicBlock *NewSubregionExit = Builder.GetInsertBlock(); |
1630 | 13 | |
1631 | 13 | // This can happen if the subregion is simplified after the ScopStmts |
1632 | 13 | // have been created; simplification happens as part of CodeGeneration. |
1633 | 13 | if (OrigPHI->getParent() != SubR->getExit()) { |
1634 | 6 | BasicBlock *FormerExit = SubR->getExitingBlock(); |
1635 | 6 | if (FormerExit) |
1636 | 4 | NewSubregionExit = StartBlockMap.lookup(FormerExit); |
1637 | 6 | } |
1638 | 13 | |
1639 | 13 | PHINode *NewPHI = PHINode::Create(OrigPHI->getType(), Incoming.size(), |
1640 | 13 | "polly." + OrigPHI->getName(), |
1641 | 13 | NewSubregionExit->getFirstNonPHI()); |
1642 | 13 | |
1643 | 13 | // Add the incoming values to the PHI. |
1644 | 28 | for (auto &Pair : Incoming) { |
1645 | 28 | BasicBlock *OrigIncomingBlock = Pair.first; |
1646 | 28 | BasicBlock *NewIncomingBlockStart = StartBlockMap.lookup(OrigIncomingBlock); |
1647 | 28 | BasicBlock *NewIncomingBlockEnd = EndBlockMap.lookup(OrigIncomingBlock); |
1648 | 28 | Builder.SetInsertPoint(NewIncomingBlockEnd->getTerminator()); |
1649 | 28 | assert(RegionMaps.count(NewIncomingBlockStart)); |
1650 | 28 | assert(RegionMaps.count(NewIncomingBlockEnd)); |
1651 | 28 | ValueMapT *LocalBBMap = &RegionMaps[NewIncomingBlockStart]; |
1652 | 28 | |
1653 | 28 | Value *OrigIncomingValue = Pair.second; |
1654 | 28 | Value *NewIncomingValue = |
1655 | 28 | getNewValue(*Stmt, OrigIncomingValue, *LocalBBMap, LTS, L); |
1656 | 28 | NewPHI->addIncoming(NewIncomingValue, NewIncomingBlockEnd); |
1657 | 28 | } |
1658 | 13 | |
1659 | 13 | return NewPHI; |
1660 | 13 | } |
1661 | | |
1662 | | Value *RegionGenerator::getExitScalar(MemoryAccess *MA, LoopToScevMapT <S, |
1663 | 20 | ValueMapT &BBMap) { |
1664 | 20 | ScopStmt *Stmt = MA->getStatement(); |
1665 | 20 | |
1666 | 20 | // TODO: Add some test cases that ensure this is really the right choice. |
1667 | 20 | Loop *L = LI.getLoopFor(Stmt->getRegion()->getExit()); |
1668 | 20 | |
1669 | 20 | if (MA->isAnyPHIKind()) { |
1670 | 13 | auto Incoming = MA->getIncoming(); |
1671 | 13 | assert(!Incoming.empty() && |
1672 | 13 | "PHI WRITEs must have originate from at least one incoming block"); |
1673 | 13 | |
1674 | 13 | // If there is only one incoming value, we do not need to create a PHI. |
1675 | 13 | if (Incoming.size() == 1) { |
1676 | 0 | Value *OldVal = Incoming[0].second; |
1677 | 0 | return getNewValue(*Stmt, OldVal, BBMap, LTS, L); |
1678 | 0 | } |
1679 | 13 | |
1680 | 13 | return buildExitPHI(MA, LTS, BBMap, L); |
1681 | 13 | } |
1682 | 7 | |
1683 | 7 | // MemoryKind::Value accesses leaving the subregion must dominate the exit |
1684 | 7 | // block; just pass the copied value. |
1685 | 7 | Value *OldVal = MA->getAccessValue(); |
1686 | 7 | return getNewValue(*Stmt, OldVal, BBMap, LTS, L); |
1687 | 7 | } |
1688 | | |
1689 | | void RegionGenerator::generateScalarStores( |
1690 | | ScopStmt &Stmt, LoopToScevMapT <S, ValueMapT &BBMap, |
1691 | 36 | __isl_keep isl_id_to_ast_expr *NewAccesses) { |
1692 | 36 | assert(Stmt.getRegion() && |
1693 | 36 | "Block statements need to use the generateScalarStores() " |
1694 | 36 | "function in the BlockGenerator"); |
1695 | 36 | |
1696 | 36 | // Get the exit scalar values before generating the writes. |
1697 | 36 | // This is necessary because RegionGenerator::getExitScalar may insert |
1698 | 36 | // PHINodes that depend on the region's exiting blocks. But |
1699 | 36 | // BlockGenerator::generateConditionalExecution may insert a new basic block |
1700 | 36 | // such that the current basic block is not a direct successor of the exiting |
1701 | 36 | // blocks anymore. Hence, build the PHINodes while the current block is still |
1702 | 36 | // the direct successor. |
1703 | 36 | SmallDenseMap<MemoryAccess *, Value *> NewExitScalars; |
1704 | 98 | for (MemoryAccess *MA : Stmt) { |
1705 | 98 | if (MA->isOriginalArrayKind() || MA->isRead()30 ) |
1706 | 78 | continue; |
1707 | 20 | |
1708 | 20 | Value *NewVal = getExitScalar(MA, LTS, BBMap); |
1709 | 20 | NewExitScalars[MA] = NewVal; |
1710 | 20 | } |
1711 | 36 | |
1712 | 98 | for (MemoryAccess *MA : Stmt) { |
1713 | 98 | if (MA->isOriginalArrayKind() || MA->isRead()30 ) |
1714 | 78 | continue; |
1715 | 20 | |
1716 | 20 | isl::set AccDom = MA->getAccessRelation().domain(); |
1717 | 20 | std::string Subject = MA->getId().get_name(); |
1718 | 20 | generateConditionalExecution( |
1719 | 20 | Stmt, AccDom, Subject.c_str(), [&, this, MA]() { |
1720 | 20 | Value *NewVal = NewExitScalars.lookup(MA); |
1721 | 20 | assert(NewVal && "The exit scalar must be determined before"); |
1722 | 20 | Value *Address = getImplicitAddress(*MA, getLoopForStmt(Stmt), LTS, |
1723 | 20 | BBMap, NewAccesses); |
1724 | 20 | assert((!isa<Instruction>(NewVal) || |
1725 | 20 | DT.dominates(cast<Instruction>(NewVal)->getParent(), |
1726 | 20 | Builder.GetInsertBlock())) && |
1727 | 20 | "Domination violation"); |
1728 | 20 | assert((!isa<Instruction>(Address) || |
1729 | 20 | DT.dominates(cast<Instruction>(Address)->getParent(), |
1730 | 20 | Builder.GetInsertBlock())) && |
1731 | 20 | "Domination violation"); |
1732 | 20 | Builder.CreateStore(NewVal, Address); |
1733 | 20 | }); |
1734 | 20 | } |
1735 | 36 | } |
1736 | | |
1737 | | void RegionGenerator::addOperandToPHI(ScopStmt &Stmt, PHINode *PHI, |
1738 | | PHINode *PHICopy, BasicBlock *IncomingBB, |
1739 | 15 | LoopToScevMapT <S) { |
1740 | 15 | // If the incoming block was not yet copied mark this PHI as incomplete. |
1741 | 15 | // Once the block will be copied the incoming value will be added. |
1742 | 15 | BasicBlock *BBCopyStart = StartBlockMap[IncomingBB]; |
1743 | 15 | BasicBlock *BBCopyEnd = EndBlockMap[IncomingBB]; |
1744 | 15 | if (!BBCopyStart) { |
1745 | 3 | assert(!BBCopyEnd); |
1746 | 3 | assert(Stmt.represents(IncomingBB) && |
1747 | 3 | "Bad incoming block for PHI in non-affine region"); |
1748 | 3 | IncompletePHINodeMap[IncomingBB].push_back(std::make_pair(PHI, PHICopy)); |
1749 | 3 | return; |
1750 | 3 | } |
1751 | 12 | |
1752 | 12 | assert(RegionMaps.count(BBCopyStart) && |
1753 | 12 | "Incoming PHI block did not have a BBMap"); |
1754 | 12 | ValueMapT &BBCopyMap = RegionMaps[BBCopyStart]; |
1755 | 12 | |
1756 | 12 | Value *OpCopy = nullptr; |
1757 | 12 | |
1758 | 12 | if (Stmt.represents(IncomingBB)) { |
1759 | 6 | Value *Op = PHI->getIncomingValueForBlock(IncomingBB); |
1760 | 6 | |
1761 | 6 | // If the current insert block is different from the PHIs incoming block |
1762 | 6 | // change it, otherwise do not. |
1763 | 6 | auto IP = Builder.GetInsertPoint(); |
1764 | 6 | if (IP->getParent() != BBCopyEnd) |
1765 | 3 | Builder.SetInsertPoint(BBCopyEnd->getTerminator()); |
1766 | 6 | OpCopy = getNewValue(Stmt, Op, BBCopyMap, LTS, getLoopForStmt(Stmt)); |
1767 | 6 | if (IP->getParent() != BBCopyEnd) |
1768 | 3 | Builder.SetInsertPoint(&*IP); |
1769 | 6 | } else { |
1770 | 6 | // All edges from outside the non-affine region become a single edge |
1771 | 6 | // in the new copy of the non-affine region. Make sure to only add the |
1772 | 6 | // corresponding edge the first time we encounter a basic block from |
1773 | 6 | // outside the non-affine region. |
1774 | 6 | if (PHICopy->getBasicBlockIndex(BBCopyEnd) >= 0) |
1775 | 1 | return; |
1776 | 5 | |
1777 | 5 | // Get the reloaded value. |
1778 | 5 | OpCopy = getNewValue(Stmt, PHI, BBCopyMap, LTS, getLoopForStmt(Stmt)); |
1779 | 5 | } |
1780 | 12 | |
1781 | 12 | assert(OpCopy && "Incoming PHI value was not copied properly"); |
1782 | 11 | PHICopy->addIncoming(OpCopy, BBCopyEnd); |
1783 | 11 | } |
1784 | | |
1785 | | void RegionGenerator::copyPHIInstruction(ScopStmt &Stmt, PHINode *PHI, |
1786 | | ValueMapT &BBMap, |
1787 | 7 | LoopToScevMapT <S) { |
1788 | 7 | unsigned NumIncoming = PHI->getNumIncomingValues(); |
1789 | 7 | PHINode *PHICopy = |
1790 | 7 | Builder.CreatePHI(PHI->getType(), NumIncoming, "polly." + PHI->getName()); |
1791 | 7 | PHICopy->moveBefore(PHICopy->getParent()->getFirstNonPHI()); |
1792 | 7 | BBMap[PHI] = PHICopy; |
1793 | 7 | |
1794 | 7 | for (BasicBlock *IncomingBB : PHI->blocks()) |
1795 | 12 | addOperandToPHI(Stmt, PHI, PHICopy, IncomingBB, LTS); |
1796 | 7 | } |