/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===----------------------- AlignmentFromAssumptions.cpp -----------------===// |
2 | | // Set Load/Store Alignments From Assumptions |
3 | | // |
4 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
5 | | // See https://llvm.org/LICENSE.txt for license information. |
6 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | // |
10 | | // This file implements a ScalarEvolution-based transformation to set |
11 | | // the alignments of load, stores and memory intrinsics based on the truth |
12 | | // expressions of assume intrinsics. The primary motivation is to handle |
13 | | // complex alignment assumptions that apply to vector loads and stores that |
14 | | // appear after vectorization and unrolling. |
15 | | // |
16 | | //===----------------------------------------------------------------------===// |
17 | | |
18 | | #define AA_NAME "alignment-from-assumptions" |
19 | | #define DEBUG_TYPE AA_NAME |
20 | | #include "llvm/Transforms/Scalar/AlignmentFromAssumptions.h" |
21 | | #include "llvm/ADT/SmallPtrSet.h" |
22 | | #include "llvm/ADT/Statistic.h" |
23 | | #include "llvm/Analysis/AliasAnalysis.h" |
24 | | #include "llvm/Analysis/AssumptionCache.h" |
25 | | #include "llvm/Analysis/GlobalsModRef.h" |
26 | | #include "llvm/Analysis/LoopInfo.h" |
27 | | #include "llvm/Analysis/ScalarEvolutionExpressions.h" |
28 | | #include "llvm/Analysis/ValueTracking.h" |
29 | | #include "llvm/IR/Constant.h" |
30 | | #include "llvm/IR/Dominators.h" |
31 | | #include "llvm/IR/Instruction.h" |
32 | | #include "llvm/IR/Intrinsics.h" |
33 | | #include "llvm/IR/Module.h" |
34 | | #include "llvm/Support/Debug.h" |
35 | | #include "llvm/Support/raw_ostream.h" |
36 | | #include "llvm/Transforms/Scalar.h" |
37 | | using namespace llvm; |
38 | | |
39 | | STATISTIC(NumLoadAlignChanged, |
40 | | "Number of loads changed by alignment assumptions"); |
41 | | STATISTIC(NumStoreAlignChanged, |
42 | | "Number of stores changed by alignment assumptions"); |
43 | | STATISTIC(NumMemIntAlignChanged, |
44 | | "Number of memory intrinsics changed by alignment assumptions"); |
45 | | |
46 | | namespace { |
47 | | struct AlignmentFromAssumptions : public FunctionPass { |
48 | | static char ID; // Pass identification, replacement for typeid |
49 | 13.4k | AlignmentFromAssumptions() : FunctionPass(ID) { |
50 | 13.4k | initializeAlignmentFromAssumptionsPass(*PassRegistry::getPassRegistry()); |
51 | 13.4k | } |
52 | | |
53 | | bool runOnFunction(Function &F) override; |
54 | | |
55 | 13.4k | void getAnalysisUsage(AnalysisUsage &AU) const override { |
56 | 13.4k | AU.addRequired<AssumptionCacheTracker>(); |
57 | 13.4k | AU.addRequired<ScalarEvolutionWrapperPass>(); |
58 | 13.4k | AU.addRequired<DominatorTreeWrapperPass>(); |
59 | 13.4k | |
60 | 13.4k | AU.setPreservesCFG(); |
61 | 13.4k | AU.addPreserved<AAResultsWrapperPass>(); |
62 | 13.4k | AU.addPreserved<GlobalsAAWrapperPass>(); |
63 | 13.4k | AU.addPreserved<LoopInfoWrapperPass>(); |
64 | 13.4k | AU.addPreserved<DominatorTreeWrapperPass>(); |
65 | 13.4k | AU.addPreserved<ScalarEvolutionWrapperPass>(); |
66 | 13.4k | } |
67 | | |
68 | | AlignmentFromAssumptionsPass Impl; |
69 | | }; |
70 | | } |
71 | | |
72 | | char AlignmentFromAssumptions::ID = 0; |
73 | | static const char aip_name[] = "Alignment from assumptions"; |
74 | 48.9k | INITIALIZE_PASS_BEGIN(AlignmentFromAssumptions, AA_NAME, |
75 | 48.9k | aip_name, false, false) |
76 | 48.9k | INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) |
77 | 48.9k | INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) |
78 | 48.9k | INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) |
79 | 48.9k | INITIALIZE_PASS_END(AlignmentFromAssumptions, AA_NAME, |
80 | | aip_name, false, false) |
81 | | |
82 | 13.4k | FunctionPass *llvm::createAlignmentFromAssumptionsPass() { |
83 | 13.4k | return new AlignmentFromAssumptions(); |
84 | 13.4k | } |
85 | | |
86 | | // Given an expression for the (constant) alignment, AlignSCEV, and an |
87 | | // expression for the displacement between a pointer and the aligned address, |
88 | | // DiffSCEV, compute the alignment of the displaced pointer if it can be reduced |
89 | | // to a constant. Using SCEV to compute alignment handles the case where |
90 | | // DiffSCEV is a recurrence with constant start such that the aligned offset |
91 | | // is constant. e.g. {16,+,32} % 32 -> 16. |
92 | | static unsigned getNewAlignmentDiff(const SCEV *DiffSCEV, |
93 | | const SCEV *AlignSCEV, |
94 | 68 | ScalarEvolution *SE) { |
95 | 68 | // DiffUnits = Diff % int64_t(Alignment) |
96 | 68 | const SCEV *DiffAlignDiv = SE->getUDivExpr(DiffSCEV, AlignSCEV); |
97 | 68 | const SCEV *DiffAlign = SE->getMulExpr(DiffAlignDiv, AlignSCEV); |
98 | 68 | const SCEV *DiffUnitsSCEV = SE->getMinusSCEV(DiffAlign, DiffSCEV); |
99 | 68 | |
100 | 68 | LLVM_DEBUG(dbgs() << "\talignment relative to " << *AlignSCEV << " is " |
101 | 68 | << *DiffUnitsSCEV << " (diff: " << *DiffSCEV << ")\n"); |
102 | 68 | |
103 | 68 | if (const SCEVConstant *ConstDUSCEV = |
104 | 52 | dyn_cast<SCEVConstant>(DiffUnitsSCEV)) { |
105 | 52 | int64_t DiffUnits = ConstDUSCEV->getValue()->getSExtValue(); |
106 | 52 | |
107 | 52 | // If the displacement is an exact multiple of the alignment, then the |
108 | 52 | // displaced pointer has the same alignment as the aligned pointer, so |
109 | 52 | // return the alignment value. |
110 | 52 | if (!DiffUnits) |
111 | 32 | return (unsigned) |
112 | 32 | cast<SCEVConstant>(AlignSCEV)->getValue()->getSExtValue(); |
113 | 20 | |
114 | 20 | // If the displacement is not an exact multiple, but the remainder is a |
115 | 20 | // constant, then return this remainder (but only if it is a power of 2). |
116 | 20 | uint64_t DiffUnitsAbs = std::abs(DiffUnits); |
117 | 20 | if (isPowerOf2_64(DiffUnitsAbs)) |
118 | 20 | return (unsigned) DiffUnitsAbs; |
119 | 16 | } |
120 | 16 | |
121 | 16 | return 0; |
122 | 16 | } |
123 | | |
124 | | // There is an address given by an offset OffSCEV from AASCEV which has an |
125 | | // alignment AlignSCEV. Use that information, if possible, to compute a new |
126 | | // alignment for Ptr. |
127 | | static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV, |
128 | | const SCEV *OffSCEV, Value *Ptr, |
129 | 52 | ScalarEvolution *SE) { |
130 | 52 | const SCEV *PtrSCEV = SE->getSCEV(Ptr); |
131 | 52 | const SCEV *DiffSCEV = SE->getMinusSCEV(PtrSCEV, AASCEV); |
132 | 52 | |
133 | 52 | // On 32-bit platforms, DiffSCEV might now have type i32 -- we've always |
134 | 52 | // sign-extended OffSCEV to i64, so make sure they agree again. |
135 | 52 | DiffSCEV = SE->getNoopOrSignExtend(DiffSCEV, OffSCEV->getType()); |
136 | 52 | |
137 | 52 | // What we really want to know is the overall offset to the aligned |
138 | 52 | // address. This address is displaced by the provided offset. |
139 | 52 | DiffSCEV = SE->getMinusSCEV(DiffSCEV, OffSCEV); |
140 | 52 | |
141 | 52 | LLVM_DEBUG(dbgs() << "AFI: alignment of " << *Ptr << " relative to " |
142 | 52 | << *AlignSCEV << " and offset " << *OffSCEV |
143 | 52 | << " using diff " << *DiffSCEV << "\n"); |
144 | 52 | |
145 | 52 | unsigned NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE); |
146 | 52 | LLVM_DEBUG(dbgs() << "\tnew alignment: " << NewAlignment << "\n"); |
147 | 52 | |
148 | 52 | if (NewAlignment) { |
149 | 36 | return NewAlignment; |
150 | 36 | } else if (const SCEVAddRecExpr *16 DiffARSCEV16 = |
151 | 8 | dyn_cast<SCEVAddRecExpr>(DiffSCEV)) { |
152 | 8 | // The relative offset to the alignment assumption did not yield a constant, |
153 | 8 | // but we should try harder: if we assume that a is 32-byte aligned, then in |
154 | 8 | // for (i = 0; i < 1024; i += 4) r += a[i]; not all of the loads from a are |
155 | 8 | // 32-byte aligned, but instead alternate between 32 and 16-byte alignment. |
156 | 8 | // As a result, the new alignment will not be a constant, but can still |
157 | 8 | // be improved over the default (of 4) to 16. |
158 | 8 | |
159 | 8 | const SCEV *DiffStartSCEV = DiffARSCEV->getStart(); |
160 | 8 | const SCEV *DiffIncSCEV = DiffARSCEV->getStepRecurrence(*SE); |
161 | 8 | |
162 | 8 | LLVM_DEBUG(dbgs() << "\ttrying start/inc alignment using start " |
163 | 8 | << *DiffStartSCEV << " and inc " << *DiffIncSCEV << "\n"); |
164 | 8 | |
165 | 8 | // Now compute the new alignment using the displacement to the value in the |
166 | 8 | // first iteration, and also the alignment using the per-iteration delta. |
167 | 8 | // If these are the same, then use that answer. Otherwise, use the smaller |
168 | 8 | // one, but only if it divides the larger one. |
169 | 8 | NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE); |
170 | 8 | unsigned NewIncAlignment = getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE); |
171 | 8 | |
172 | 8 | LLVM_DEBUG(dbgs() << "\tnew start alignment: " << NewAlignment << "\n"); |
173 | 8 | LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << NewIncAlignment << "\n"); |
174 | 8 | |
175 | 8 | if (!NewAlignment || !NewIncAlignment) { |
176 | 0 | return 0; |
177 | 8 | } else if (NewAlignment > NewIncAlignment) { |
178 | 4 | if (NewAlignment % NewIncAlignment == 0) { |
179 | 4 | LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewIncAlignment |
180 | 4 | << "\n"); |
181 | 4 | return NewIncAlignment; |
182 | 4 | } |
183 | 4 | } else if (NewIncAlignment > NewAlignment) { |
184 | 0 | if (NewIncAlignment % NewAlignment == 0) { |
185 | 0 | LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment |
186 | 0 | << "\n"); |
187 | 0 | return NewAlignment; |
188 | 0 | } |
189 | 4 | } else if (NewIncAlignment == NewAlignment) { |
190 | 4 | LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment |
191 | 4 | << "\n"); |
192 | 4 | return NewAlignment; |
193 | 4 | } |
194 | 8 | } |
195 | 8 | |
196 | 8 | return 0; |
197 | 8 | } |
198 | | |
199 | | bool AlignmentFromAssumptionsPass::extractAlignmentInfo(CallInst *I, |
200 | | Value *&AAPtr, |
201 | | const SCEV *&AlignSCEV, |
202 | 74 | const SCEV *&OffSCEV) { |
203 | 74 | // An alignment assume must be a statement about the least-significant |
204 | 74 | // bits of the pointer being zero, possibly with some offset. |
205 | 74 | ICmpInst *ICI = dyn_cast<ICmpInst>(I->getArgOperand(0)); |
206 | 74 | if (!ICI) |
207 | 4 | return false; |
208 | 70 | |
209 | 70 | // This must be an expression of the form: x & m == 0. |
210 | 70 | if (ICI->getPredicate() != ICmpInst::ICMP_EQ) |
211 | 8 | return false; |
212 | 62 | |
213 | 62 | // Swap things around so that the RHS is 0. |
214 | 62 | Value *CmpLHS = ICI->getOperand(0); |
215 | 62 | Value *CmpRHS = ICI->getOperand(1); |
216 | 62 | const SCEV *CmpLHSSCEV = SE->getSCEV(CmpLHS); |
217 | 62 | const SCEV *CmpRHSSCEV = SE->getSCEV(CmpRHS); |
218 | 62 | if (CmpLHSSCEV->isZero()) |
219 | 0 | std::swap(CmpLHS, CmpRHS); |
220 | 62 | else if (!CmpRHSSCEV->isZero()) |
221 | 15 | return false; |
222 | 47 | |
223 | 47 | BinaryOperator *CmpBO = dyn_cast<BinaryOperator>(CmpLHS); |
224 | 47 | if (!CmpBO || CmpBO->getOpcode() != Instruction::And46 ) |
225 | 1 | return false; |
226 | 46 | |
227 | 46 | // Swap things around so that the right operand of the and is a constant |
228 | 46 | // (the mask); we cannot deal with variable masks. |
229 | 46 | Value *AndLHS = CmpBO->getOperand(0); |
230 | 46 | Value *AndRHS = CmpBO->getOperand(1); |
231 | 46 | const SCEV *AndLHSSCEV = SE->getSCEV(AndLHS); |
232 | 46 | const SCEV *AndRHSSCEV = SE->getSCEV(AndRHS); |
233 | 46 | if (isa<SCEVConstant>(AndLHSSCEV)) { |
234 | 0 | std::swap(AndLHS, AndRHS); |
235 | 0 | std::swap(AndLHSSCEV, AndRHSSCEV); |
236 | 0 | } |
237 | 46 | |
238 | 46 | const SCEVConstant *MaskSCEV = dyn_cast<SCEVConstant>(AndRHSSCEV); |
239 | 46 | if (!MaskSCEV) |
240 | 0 | return false; |
241 | 46 | |
242 | 46 | // The mask must have some trailing ones (otherwise the condition is |
243 | 46 | // trivial and tells us nothing about the alignment of the left operand). |
244 | 46 | unsigned TrailingOnes = MaskSCEV->getAPInt().countTrailingOnes(); |
245 | 46 | if (!TrailingOnes) |
246 | 0 | return false; |
247 | 46 | |
248 | 46 | // Cap the alignment at the maximum with which LLVM can deal (and make sure |
249 | 46 | // we don't overflow the shift). |
250 | 46 | uint64_t Alignment; |
251 | 46 | TrailingOnes = std::min(TrailingOnes, |
252 | 46 | unsigned(sizeof(unsigned) * CHAR_BIT - 1)); |
253 | 46 | Alignment = std::min(1u << TrailingOnes, +Value::MaximumAlignment); |
254 | 46 | |
255 | 46 | Type *Int64Ty = Type::getInt64Ty(I->getParent()->getParent()->getContext()); |
256 | 46 | AlignSCEV = SE->getConstant(Int64Ty, Alignment); |
257 | 46 | |
258 | 46 | // The LHS might be a ptrtoint instruction, or it might be the pointer |
259 | 46 | // with an offset. |
260 | 46 | AAPtr = nullptr; |
261 | 46 | OffSCEV = nullptr; |
262 | 46 | if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(AndLHS)) { |
263 | 38 | AAPtr = PToI->getPointerOperand(); |
264 | 38 | OffSCEV = SE->getZero(Int64Ty); |
265 | 38 | } else if (const SCEVAddExpr* 8 AndLHSAddSCEV8 = |
266 | 8 | dyn_cast<SCEVAddExpr>(AndLHSSCEV)) { |
267 | 8 | // Try to find the ptrtoint; subtract it and the rest is the offset. |
268 | 8 | for (SCEVAddExpr::op_iterator J = AndLHSAddSCEV->op_begin(), |
269 | 16 | JE = AndLHSAddSCEV->op_end(); J != JE; ++J8 ) |
270 | 16 | if (const SCEVUnknown *OpUnk = dyn_cast<SCEVUnknown>(*J)) |
271 | 8 | if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(OpUnk->getValue())) { |
272 | 8 | AAPtr = PToI->getPointerOperand(); |
273 | 8 | OffSCEV = SE->getMinusSCEV(AndLHSAddSCEV, *J); |
274 | 8 | break; |
275 | 8 | } |
276 | 8 | } |
277 | 46 | |
278 | 46 | if (!AAPtr) |
279 | 0 | return false; |
280 | 46 | |
281 | 46 | // Sign extend the offset to 64 bits (so that it is like all of the other |
282 | 46 | // expressions). |
283 | 46 | unsigned OffSCEVBits = OffSCEV->getType()->getPrimitiveSizeInBits(); |
284 | 46 | if (OffSCEVBits < 64) |
285 | 0 | OffSCEV = SE->getSignExtendExpr(OffSCEV, Int64Ty); |
286 | 46 | else if (OffSCEVBits > 64) |
287 | 0 | return false; |
288 | 46 | |
289 | 46 | AAPtr = AAPtr->stripPointerCasts(); |
290 | 46 | return true; |
291 | 46 | } |
292 | | |
293 | 74 | bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) { |
294 | 74 | Value *AAPtr; |
295 | 74 | const SCEV *AlignSCEV, *OffSCEV; |
296 | 74 | if (!extractAlignmentInfo(ACall, AAPtr, AlignSCEV, OffSCEV)) |
297 | 28 | return false; |
298 | 46 | |
299 | 46 | // Skip ConstantPointerNull and UndefValue. Assumptions on these shouldn't |
300 | 46 | // affect other users. |
301 | 46 | if (isa<ConstantData>(AAPtr)) |
302 | 2 | return false; |
303 | 44 | |
304 | 44 | const SCEV *AASCEV = SE->getSCEV(AAPtr); |
305 | 44 | |
306 | 44 | // Apply the assumption to all other users of the specified pointer. |
307 | 44 | SmallPtrSet<Instruction *, 32> Visited; |
308 | 44 | SmallVector<Instruction*, 16> WorkList; |
309 | 88 | for (User *J : AAPtr->users()) { |
310 | 88 | if (J == ACall) |
311 | 0 | continue; |
312 | 88 | |
313 | 88 | if (Instruction *K = dyn_cast<Instruction>(J)) |
314 | 88 | if (isValidAssumeForContext(ACall, K, DT)) |
315 | 44 | WorkList.push_back(K); |
316 | 88 | } |
317 | 44 | |
318 | 204 | while (!WorkList.empty()) { |
319 | 160 | Instruction *J = WorkList.pop_back_val(); |
320 | 160 | |
321 | 160 | if (LoadInst *LI = dyn_cast<LoadInst>(J)) { |
322 | 32 | unsigned NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, |
323 | 32 | LI->getPointerOperand(), SE); |
324 | 32 | |
325 | 32 | if (NewAlignment > LI->getAlignment()) { |
326 | 32 | LI->setAlignment(NewAlignment); |
327 | 32 | ++NumLoadAlignChanged; |
328 | 32 | } |
329 | 128 | } else if (StoreInst *SI = dyn_cast<StoreInst>(J)) { |
330 | 0 | unsigned NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, |
331 | 0 | SI->getPointerOperand(), SE); |
332 | 0 |
|
333 | 0 | if (NewAlignment > SI->getAlignment()) { |
334 | 0 | SI->setAlignment(NewAlignment); |
335 | 0 | ++NumStoreAlignChanged; |
336 | 0 | } |
337 | 128 | } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) { |
338 | 12 | unsigned NewDestAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, |
339 | 12 | MI->getDest(), SE); |
340 | 12 | |
341 | 12 | LLVM_DEBUG(dbgs() << "\tmem inst: " << NewDestAlignment << "\n";); |
342 | 12 | if (NewDestAlignment > MI->getDestAlignment()) { |
343 | 8 | MI->setDestAlignment(NewDestAlignment); |
344 | 8 | ++NumMemIntAlignChanged; |
345 | 8 | } |
346 | 12 | |
347 | 12 | // For memory transfers, there is also a source alignment that |
348 | 12 | // can be set. |
349 | 12 | if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { |
350 | 8 | unsigned NewSrcAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, |
351 | 8 | MTI->getSource(), SE); |
352 | 8 | |
353 | 8 | LLVM_DEBUG(dbgs() << "\tmem trans: " << NewSrcAlignment << "\n";); |
354 | 8 | |
355 | 8 | if (NewSrcAlignment > MTI->getSourceAlignment()) { |
356 | 4 | MTI->setSourceAlignment(NewSrcAlignment); |
357 | 4 | ++NumMemIntAlignChanged; |
358 | 4 | } |
359 | 8 | } |
360 | 12 | } |
361 | 160 | |
362 | 160 | // Now that we've updated that use of the pointer, look for other uses of |
363 | 160 | // the pointer to update. |
364 | 160 | Visited.insert(J); |
365 | 160 | for (User *UJ : J->users()) { |
366 | 132 | Instruction *K = cast<Instruction>(UJ); |
367 | 132 | if (!Visited.count(K) && isValidAssumeForContext(ACall, K, DT)116 ) |
368 | 116 | WorkList.push_back(K); |
369 | 132 | } |
370 | 160 | } |
371 | 44 | |
372 | 44 | return true; |
373 | 44 | } |
374 | | |
375 | 278k | bool AlignmentFromAssumptions::runOnFunction(Function &F) { |
376 | 278k | if (skipFunction(F)) |
377 | 44 | return false; |
378 | 278k | |
379 | 278k | auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); |
380 | 278k | ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); |
381 | 278k | DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); |
382 | 278k | |
383 | 278k | return Impl.runImpl(F, AC, SE, DT); |
384 | 278k | } |
385 | | |
386 | | bool AlignmentFromAssumptionsPass::runImpl(Function &F, AssumptionCache &AC, |
387 | | ScalarEvolution *SE_, |
388 | 279k | DominatorTree *DT_) { |
389 | 279k | SE = SE_; |
390 | 279k | DT = DT_; |
391 | 279k | |
392 | 279k | bool Changed = false; |
393 | 279k | for (auto &AssumeVH : AC.assumptions()) |
394 | 75 | if (AssumeVH) |
395 | 74 | Changed |= processAssumption(cast<CallInst>(AssumeVH)); |
396 | 279k | |
397 | 279k | return Changed; |
398 | 279k | } |
399 | | |
400 | | PreservedAnalyses |
401 | 881 | AlignmentFromAssumptionsPass::run(Function &F, FunctionAnalysisManager &AM) { |
402 | 881 | |
403 | 881 | AssumptionCache &AC = AM.getResult<AssumptionAnalysis>(F); |
404 | 881 | ScalarEvolution &SE = AM.getResult<ScalarEvolutionAnalysis>(F); |
405 | 881 | DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); |
406 | 881 | if (!runImpl(F, AC, &SE, &DT)) |
407 | 861 | return PreservedAnalyses::all(); |
408 | 20 | |
409 | 20 | PreservedAnalyses PA; |
410 | 20 | PA.preserveSet<CFGAnalyses>(); |
411 | 20 | PA.preserve<AAManager>(); |
412 | 20 | PA.preserve<ScalarEvolutionAnalysis>(); |
413 | 20 | PA.preserve<GlobalsAA>(); |
414 | 20 | return PA; |
415 | 20 | } |