/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/CodeGen/SafeStack.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===- SafeStack.cpp - Safe Stack Insertion -------------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This pass splits the stack into the safe stack (kept as-is for LLVM backend) |
10 | | // and the unsafe stack (explicitly allocated and managed through the runtime |
11 | | // support library). |
12 | | // |
13 | | // http://clang.llvm.org/docs/SafeStack.html |
14 | | // |
15 | | //===----------------------------------------------------------------------===// |
16 | | |
17 | | #include "SafeStackColoring.h" |
18 | | #include "SafeStackLayout.h" |
19 | | #include "llvm/ADT/APInt.h" |
20 | | #include "llvm/ADT/ArrayRef.h" |
21 | | #include "llvm/ADT/SmallPtrSet.h" |
22 | | #include "llvm/ADT/SmallVector.h" |
23 | | #include "llvm/ADT/Statistic.h" |
24 | | #include "llvm/Analysis/AssumptionCache.h" |
25 | | #include "llvm/Analysis/BranchProbabilityInfo.h" |
26 | | #include "llvm/Analysis/InlineCost.h" |
27 | | #include "llvm/Analysis/LoopInfo.h" |
28 | | #include "llvm/Analysis/ScalarEvolution.h" |
29 | | #include "llvm/Analysis/ScalarEvolutionExpressions.h" |
30 | | #include "llvm/Analysis/TargetLibraryInfo.h" |
31 | | #include "llvm/Transforms/Utils/Local.h" |
32 | | #include "llvm/CodeGen/TargetLowering.h" |
33 | | #include "llvm/CodeGen/TargetPassConfig.h" |
34 | | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
35 | | #include "llvm/IR/Argument.h" |
36 | | #include "llvm/IR/Attributes.h" |
37 | | #include "llvm/IR/CallSite.h" |
38 | | #include "llvm/IR/ConstantRange.h" |
39 | | #include "llvm/IR/Constants.h" |
40 | | #include "llvm/IR/DIBuilder.h" |
41 | | #include "llvm/IR/DataLayout.h" |
42 | | #include "llvm/IR/DerivedTypes.h" |
43 | | #include "llvm/IR/Dominators.h" |
44 | | #include "llvm/IR/Function.h" |
45 | | #include "llvm/IR/IRBuilder.h" |
46 | | #include "llvm/IR/InstIterator.h" |
47 | | #include "llvm/IR/Instruction.h" |
48 | | #include "llvm/IR/Instructions.h" |
49 | | #include "llvm/IR/IntrinsicInst.h" |
50 | | #include "llvm/IR/Intrinsics.h" |
51 | | #include "llvm/IR/MDBuilder.h" |
52 | | #include "llvm/IR/Module.h" |
53 | | #include "llvm/IR/Type.h" |
54 | | #include "llvm/IR/Use.h" |
55 | | #include "llvm/IR/User.h" |
56 | | #include "llvm/IR/Value.h" |
57 | | #include "llvm/Pass.h" |
58 | | #include "llvm/Support/Casting.h" |
59 | | #include "llvm/Support/Debug.h" |
60 | | #include "llvm/Support/ErrorHandling.h" |
61 | | #include "llvm/Support/MathExtras.h" |
62 | | #include "llvm/Support/raw_ostream.h" |
63 | | #include "llvm/Target/TargetMachine.h" |
64 | | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
65 | | #include "llvm/Transforms/Utils/Cloning.h" |
66 | | #include <algorithm> |
67 | | #include <cassert> |
68 | | #include <cstdint> |
69 | | #include <string> |
70 | | #include <utility> |
71 | | |
72 | | using namespace llvm; |
73 | | using namespace llvm::safestack; |
74 | | |
75 | | #define DEBUG_TYPE "safe-stack" |
76 | | |
77 | | namespace llvm { |
78 | | |
79 | | STATISTIC(NumFunctions, "Total number of functions"); |
80 | | STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack"); |
81 | | STATISTIC(NumUnsafeStackRestorePointsFunctions, |
82 | | "Number of functions that use setjmp or exceptions"); |
83 | | |
84 | | STATISTIC(NumAllocas, "Total number of allocas"); |
85 | | STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas"); |
86 | | STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas"); |
87 | | STATISTIC(NumUnsafeByValArguments, "Number of unsafe byval arguments"); |
88 | | STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads"); |
89 | | |
90 | | } // namespace llvm |
91 | | |
92 | | /// Use __safestack_pointer_address even if the platform has a faster way of |
93 | | /// access safe stack pointer. |
94 | | static cl::opt<bool> |
95 | | SafeStackUsePointerAddress("safestack-use-pointer-address", |
96 | | cl::init(false), cl::Hidden); |
97 | | |
98 | | |
99 | | namespace { |
100 | | |
101 | | /// Rewrite an SCEV expression for a memory access address to an expression that |
102 | | /// represents offset from the given alloca. |
103 | | /// |
104 | | /// The implementation simply replaces all mentions of the alloca with zero. |
105 | | class AllocaOffsetRewriter : public SCEVRewriteVisitor<AllocaOffsetRewriter> { |
106 | | const Value *AllocaPtr; |
107 | | |
108 | | public: |
109 | | AllocaOffsetRewriter(ScalarEvolution &SE, const Value *AllocaPtr) |
110 | 113 | : SCEVRewriteVisitor(SE), AllocaPtr(AllocaPtr) {} |
111 | | |
112 | 122 | const SCEV *visitUnknown(const SCEVUnknown *Expr) { |
113 | 122 | if (Expr->getValue() == AllocaPtr) |
114 | 111 | return SE.getZero(Expr->getType()); |
115 | 11 | return Expr; |
116 | 11 | } |
117 | | }; |
118 | | |
119 | | /// The SafeStack pass splits the stack of each function into the safe |
120 | | /// stack, which is only accessed through memory safe dereferences (as |
121 | | /// determined statically), and the unsafe stack, which contains all |
122 | | /// local variables that are accessed in ways that we can't prove to |
123 | | /// be safe. |
124 | | class SafeStack { |
125 | | Function &F; |
126 | | const TargetLoweringBase &TL; |
127 | | const DataLayout &DL; |
128 | | ScalarEvolution &SE; |
129 | | |
130 | | Type *StackPtrTy; |
131 | | Type *IntPtrTy; |
132 | | Type *Int32Ty; |
133 | | Type *Int8Ty; |
134 | | |
135 | | Value *UnsafeStackPtr = nullptr; |
136 | | |
137 | | /// Unsafe stack alignment. Each stack frame must ensure that the stack is |
138 | | /// aligned to this value. We need to re-align the unsafe stack if the |
139 | | /// alignment of any object on the stack exceeds this value. |
140 | | /// |
141 | | /// 16 seems like a reasonable upper bound on the alignment of objects that we |
142 | | /// might expect to appear on the stack on most common targets. |
143 | | enum { StackAlignment = 16 }; |
144 | | |
145 | | /// Return the value of the stack canary. |
146 | | Value *getStackGuard(IRBuilder<> &IRB, Function &F); |
147 | | |
148 | | /// Load stack guard from the frame and check if it has changed. |
149 | | void checkStackGuard(IRBuilder<> &IRB, Function &F, ReturnInst &RI, |
150 | | AllocaInst *StackGuardSlot, Value *StackGuard); |
151 | | |
152 | | /// Find all static allocas, dynamic allocas, return instructions and |
153 | | /// stack restore points (exception unwind blocks and setjmp calls) in the |
154 | | /// given function and append them to the respective vectors. |
155 | | void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas, |
156 | | SmallVectorImpl<AllocaInst *> &DynamicAllocas, |
157 | | SmallVectorImpl<Argument *> &ByValArguments, |
158 | | SmallVectorImpl<ReturnInst *> &Returns, |
159 | | SmallVectorImpl<Instruction *> &StackRestorePoints); |
160 | | |
161 | | /// Calculate the allocation size of a given alloca. Returns 0 if the |
162 | | /// size can not be statically determined. |
163 | | uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI); |
164 | | |
165 | | /// Allocate space for all static allocas in \p StaticAllocas, |
166 | | /// replace allocas with pointers into the unsafe stack and generate code to |
167 | | /// restore the stack pointer before all return instructions in \p Returns. |
168 | | /// |
169 | | /// \returns A pointer to the top of the unsafe stack after all unsafe static |
170 | | /// allocas are allocated. |
171 | | Value *moveStaticAllocasToUnsafeStack(IRBuilder<> &IRB, Function &F, |
172 | | ArrayRef<AllocaInst *> StaticAllocas, |
173 | | ArrayRef<Argument *> ByValArguments, |
174 | | ArrayRef<ReturnInst *> Returns, |
175 | | Instruction *BasePointer, |
176 | | AllocaInst *StackGuardSlot); |
177 | | |
178 | | /// Generate code to restore the stack after all stack restore points |
179 | | /// in \p StackRestorePoints. |
180 | | /// |
181 | | /// \returns A local variable in which to maintain the dynamic top of the |
182 | | /// unsafe stack if needed. |
183 | | AllocaInst * |
184 | | createStackRestorePoints(IRBuilder<> &IRB, Function &F, |
185 | | ArrayRef<Instruction *> StackRestorePoints, |
186 | | Value *StaticTop, bool NeedDynamicTop); |
187 | | |
188 | | /// Replace all allocas in \p DynamicAllocas with code to allocate |
189 | | /// space dynamically on the unsafe stack and store the dynamic unsafe stack |
190 | | /// top to \p DynamicTop if non-null. |
191 | | void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr, |
192 | | AllocaInst *DynamicTop, |
193 | | ArrayRef<AllocaInst *> DynamicAllocas); |
194 | | |
195 | | bool IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize); |
196 | | |
197 | | bool IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U, |
198 | | const Value *AllocaPtr, uint64_t AllocaSize); |
199 | | bool IsAccessSafe(Value *Addr, uint64_t Size, const Value *AllocaPtr, |
200 | | uint64_t AllocaSize); |
201 | | |
202 | | bool ShouldInlinePointerAddress(CallSite &CS); |
203 | | void TryInlinePointerAddress(); |
204 | | |
205 | | public: |
206 | | SafeStack(Function &F, const TargetLoweringBase &TL, const DataLayout &DL, |
207 | | ScalarEvolution &SE) |
208 | | : F(F), TL(TL), DL(DL), SE(SE), |
209 | | StackPtrTy(Type::getInt8PtrTy(F.getContext())), |
210 | | IntPtrTy(DL.getIntPtrType(F.getContext())), |
211 | | Int32Ty(Type::getInt32Ty(F.getContext())), |
212 | 190 | Int8Ty(Type::getInt8Ty(F.getContext())) {} |
213 | | |
214 | | // Run the transformation on the associated function. |
215 | | // Returns whether the function was changed. |
216 | | bool run(); |
217 | | }; |
218 | | |
219 | 550 | uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) { |
220 | 550 | uint64_t Size = DL.getTypeAllocSize(AI->getAllocatedType()); |
221 | 550 | if (AI->isArrayAllocation()) { |
222 | 31 | auto C = dyn_cast<ConstantInt>(AI->getArraySize()); |
223 | 31 | if (!C) |
224 | 8 | return 0; |
225 | 23 | Size *= C->getZExtValue(); |
226 | 23 | } |
227 | 550 | return Size542 ; |
228 | 550 | } |
229 | | |
230 | | bool SafeStack::IsAccessSafe(Value *Addr, uint64_t AccessSize, |
231 | 113 | const Value *AllocaPtr, uint64_t AllocaSize) { |
232 | 113 | AllocaOffsetRewriter Rewriter(SE, AllocaPtr); |
233 | 113 | const SCEV *Expr = Rewriter.visit(SE.getSCEV(Addr)); |
234 | 113 | |
235 | 113 | uint64_t BitWidth = SE.getTypeSizeInBits(Expr->getType()); |
236 | 113 | ConstantRange AccessStartRange = SE.getUnsignedRange(Expr); |
237 | 113 | ConstantRange SizeRange = |
238 | 113 | ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AccessSize)); |
239 | 113 | ConstantRange AccessRange = AccessStartRange.add(SizeRange); |
240 | 113 | ConstantRange AllocaRange = |
241 | 113 | ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AllocaSize)); |
242 | 113 | bool Safe = AllocaRange.contains(AccessRange); |
243 | 113 | |
244 | 113 | LLVM_DEBUG( |
245 | 113 | dbgs() << "[SafeStack] " |
246 | 113 | << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ") |
247 | 113 | << *AllocaPtr << "\n" |
248 | 113 | << " Access " << *Addr << "\n" |
249 | 113 | << " SCEV " << *Expr |
250 | 113 | << " U: " << SE.getUnsignedRange(Expr) |
251 | 113 | << ", S: " << SE.getSignedRange(Expr) << "\n" |
252 | 113 | << " Range " << AccessRange << "\n" |
253 | 113 | << " AllocaRange " << AllocaRange << "\n" |
254 | 113 | << " " << (Safe ? "safe" : "unsafe") << "\n"); |
255 | 113 | |
256 | 113 | return Safe; |
257 | 113 | } |
258 | | |
259 | | bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U, |
260 | | const Value *AllocaPtr, |
261 | 11 | uint64_t AllocaSize) { |
262 | 11 | if (auto MTI = dyn_cast<MemTransferInst>(MI)) { |
263 | 3 | if (MTI->getRawSource() != U && MTI->getRawDest() != U0 ) |
264 | 0 | return true; |
265 | 8 | } else { |
266 | 8 | if (MI->getRawDest() != U) |
267 | 0 | return true; |
268 | 11 | } |
269 | 11 | |
270 | 11 | const auto *Len = dyn_cast<ConstantInt>(MI->getLength()); |
271 | 11 | // Non-constant size => unsafe. FIXME: try SCEV getRange. |
272 | 11 | if (!Len) return false2 ; |
273 | 9 | return IsAccessSafe(U, Len->getZExtValue(), AllocaPtr, AllocaSize); |
274 | 9 | } |
275 | | |
276 | | /// Check whether a given allocation must be put on the safe |
277 | | /// stack or not. The function analyzes all uses of AI and checks whether it is |
278 | | /// only accessed in a memory safe way (as decided statically). |
279 | 315 | bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) { |
280 | 315 | // Go through all uses of this alloca and check whether all accesses to the |
281 | 315 | // allocated object are statically known to be memory safe and, hence, the |
282 | 315 | // object can be placed on the safe stack. |
283 | 315 | SmallPtrSet<const Value *, 16> Visited; |
284 | 315 | SmallVector<const Value *, 8> WorkList; |
285 | 315 | WorkList.push_back(AllocaPtr); |
286 | 315 | |
287 | 315 | // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc. |
288 | 522 | while (!WorkList.empty()) { |
289 | 455 | const Value *V = WorkList.pop_back_val(); |
290 | 492 | for (const Use &UI : V->uses()) { |
291 | 492 | auto I = cast<const Instruction>(UI.getUser()); |
292 | 492 | assert(V == UI.get()); |
293 | 492 | |
294 | 492 | switch (I->getOpcode()) { |
295 | 492 | case Instruction::Load: |
296 | 53 | if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getType()), AllocaPtr, |
297 | 53 | AllocaSize)) |
298 | 17 | return false; |
299 | 36 | break; |
300 | 36 | |
301 | 36 | case Instruction::VAArg: |
302 | 0 | // "va-arg" from a pointer is safe. |
303 | 0 | break; |
304 | 63 | case Instruction::Store: |
305 | 63 | if (V == I->getOperand(0)) { |
306 | 12 | // Stored the pointer - conservatively assume it may be unsafe. |
307 | 12 | LLVM_DEBUG(dbgs() |
308 | 12 | << "[SafeStack] Unsafe alloca: " << *AllocaPtr |
309 | 12 | << "\n store of address: " << *I << "\n"); |
310 | 12 | return false; |
311 | 12 | } |
312 | 51 | |
313 | 51 | if (!IsAccessSafe(UI, DL.getTypeStoreSize(I->getOperand(0)->getType()), |
314 | 51 | AllocaPtr, AllocaSize)) |
315 | 6 | return false; |
316 | 45 | break; |
317 | 45 | |
318 | 45 | case Instruction::Ret: |
319 | 2 | // Information leak. |
320 | 2 | return false; |
321 | 45 | |
322 | 228 | case Instruction::Call: |
323 | 228 | case Instruction::Invoke: { |
324 | 228 | ImmutableCallSite CS(I); |
325 | 228 | |
326 | 228 | if (I->isLifetimeStartOrEnd()) |
327 | 10 | continue; |
328 | 218 | |
329 | 218 | if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) { |
330 | 11 | if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) { |
331 | 8 | LLVM_DEBUG(dbgs() |
332 | 8 | << "[SafeStack] Unsafe alloca: " << *AllocaPtr |
333 | 8 | << "\n unsafe memintrinsic: " << *I << "\n"); |
334 | 8 | return false; |
335 | 8 | } |
336 | 3 | continue; |
337 | 3 | } |
338 | 207 | |
339 | 207 | // LLVM 'nocapture' attribute is only set for arguments whose address |
340 | 207 | // is not stored, passed around, or used in any other non-trivial way. |
341 | 207 | // We assume that passing a pointer to an object as a 'nocapture |
342 | 207 | // readnone' argument is safe. |
343 | 207 | // FIXME: a more precise solution would require an interprocedural |
344 | 207 | // analysis here, which would look at all uses of an argument inside |
345 | 207 | // the function being called. |
346 | 207 | ImmutableCallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end(); |
347 | 227 | for (ImmutableCallSite::arg_iterator A = B; A != E; ++A20 ) |
348 | 223 | if (A->get() == V) |
349 | 207 | if (!(CS.doesNotCapture(A - B) && (12 CS.doesNotAccessMemory(A - B)12 || |
350 | 203 | CS.doesNotAccessMemory()10 ))) { |
351 | 203 | LLVM_DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr |
352 | 203 | << "\n unsafe call: " << *I << "\n"); |
353 | 203 | return false; |
354 | 203 | } |
355 | 207 | continue4 ; |
356 | 207 | } |
357 | 207 | |
358 | 207 | default: |
359 | 146 | if (Visited.insert(I).second) |
360 | 144 | WorkList.push_back(cast<const Instruction>(I)); |
361 | 492 | } |
362 | 492 | } |
363 | 455 | } |
364 | 315 | |
365 | 315 | // All uses of the alloca are safe, we can place it on the safe stack. |
366 | 315 | return true67 ; |
367 | 315 | } |
368 | | |
369 | 13 | Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) { |
370 | 13 | Value *StackGuardVar = TL.getIRStackGuard(IRB); |
371 | 13 | if (!StackGuardVar) |
372 | 2 | StackGuardVar = |
373 | 2 | F.getParent()->getOrInsertGlobal("__stack_chk_guard", StackPtrTy); |
374 | 13 | return IRB.CreateLoad(StackPtrTy, StackGuardVar, "StackGuard"); |
375 | 13 | } |
376 | | |
377 | | void SafeStack::findInsts(Function &F, |
378 | | SmallVectorImpl<AllocaInst *> &StaticAllocas, |
379 | | SmallVectorImpl<AllocaInst *> &DynamicAllocas, |
380 | | SmallVectorImpl<Argument *> &ByValArguments, |
381 | | SmallVectorImpl<ReturnInst *> &Returns, |
382 | 190 | SmallVectorImpl<Instruction *> &StackRestorePoints) { |
383 | 1.41k | for (Instruction &I : instructions(&F)) { |
384 | 1.41k | if (auto AI = dyn_cast<AllocaInst>(&I)) { |
385 | 305 | ++NumAllocas; |
386 | 305 | |
387 | 305 | uint64_t Size = getStaticAllocaAllocationSize(AI); |
388 | 305 | if (IsSafeStackAlloca(AI, Size)) |
389 | 64 | continue; |
390 | 241 | |
391 | 241 | if (AI->isStaticAlloca()) { |
392 | 232 | ++NumUnsafeStaticAllocas; |
393 | 232 | StaticAllocas.push_back(AI); |
394 | 232 | } else { |
395 | 9 | ++NumUnsafeDynamicAllocas; |
396 | 9 | DynamicAllocas.push_back(AI); |
397 | 9 | } |
398 | 1.11k | } else if (auto RI = dyn_cast<ReturnInst>(&I)) { |
399 | 200 | Returns.push_back(RI); |
400 | 914 | } else if (auto CI = dyn_cast<CallInst>(&I)) { |
401 | 442 | // setjmps require stack restore. |
402 | 442 | if (CI->getCalledFunction() && CI->canReturnTwice()) |
403 | 5 | StackRestorePoints.push_back(CI); |
404 | 472 | } else if (auto LP = dyn_cast<LandingPadInst>(&I)) { |
405 | 4 | // Exception landing pads require stack restore. |
406 | 4 | StackRestorePoints.push_back(LP); |
407 | 468 | } else if (auto II = dyn_cast<IntrinsicInst>(&I)) { |
408 | 0 | if (II->getIntrinsicID() == Intrinsic::gcroot) |
409 | 0 | report_fatal_error( |
410 | 0 | "gcroot intrinsic not compatible with safestack attribute"); |
411 | 0 | } |
412 | 1.41k | } |
413 | 190 | for (Argument &Arg : F.args()) { |
414 | 80 | if (!Arg.hasByValAttr()) |
415 | 70 | continue; |
416 | 10 | uint64_t Size = |
417 | 10 | DL.getTypeStoreSize(Arg.getType()->getPointerElementType()); |
418 | 10 | if (IsSafeStackAlloca(&Arg, Size)) |
419 | 3 | continue; |
420 | 7 | |
421 | 7 | ++NumUnsafeByValArguments; |
422 | 7 | ByValArguments.push_back(&Arg); |
423 | 7 | } |
424 | 190 | } |
425 | | |
426 | | AllocaInst * |
427 | | SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F, |
428 | | ArrayRef<Instruction *> StackRestorePoints, |
429 | 161 | Value *StaticTop, bool NeedDynamicTop) { |
430 | 161 | assert(StaticTop && "The stack top isn't set."); |
431 | 161 | |
432 | 161 | if (StackRestorePoints.empty()) |
433 | 152 | return nullptr; |
434 | 9 | |
435 | 9 | // We need the current value of the shadow stack pointer to restore |
436 | 9 | // after longjmp or exception catching. |
437 | 9 | |
438 | 9 | // FIXME: On some platforms this could be handled by the longjmp/exception |
439 | 9 | // runtime itself. |
440 | 9 | |
441 | 9 | AllocaInst *DynamicTop = nullptr; |
442 | 9 | if (NeedDynamicTop) { |
443 | 3 | // If we also have dynamic alloca's, the stack pointer value changes |
444 | 3 | // throughout the function. For now we store it in an alloca. |
445 | 3 | DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr, |
446 | 3 | "unsafe_stack_dynamic_ptr"); |
447 | 3 | IRB.CreateStore(StaticTop, DynamicTop); |
448 | 3 | } |
449 | 9 | |
450 | 9 | // Restore current stack pointer after longjmp/exception catch. |
451 | 9 | for (Instruction *I : StackRestorePoints) { |
452 | 9 | ++NumUnsafeStackRestorePoints; |
453 | 9 | |
454 | 9 | IRB.SetInsertPoint(I->getNextNode()); |
455 | 9 | Value *CurrentTop = |
456 | 9 | DynamicTop ? IRB.CreateLoad(StackPtrTy, DynamicTop)3 : StaticTop6 ; |
457 | 9 | IRB.CreateStore(CurrentTop, UnsafeStackPtr); |
458 | 9 | } |
459 | 9 | |
460 | 9 | return DynamicTop; |
461 | 9 | } |
462 | | |
463 | | void SafeStack::checkStackGuard(IRBuilder<> &IRB, Function &F, ReturnInst &RI, |
464 | 13 | AllocaInst *StackGuardSlot, Value *StackGuard) { |
465 | 13 | Value *V = IRB.CreateLoad(StackPtrTy, StackGuardSlot); |
466 | 13 | Value *Cmp = IRB.CreateICmpNE(StackGuard, V); |
467 | 13 | |
468 | 13 | auto SuccessProb = BranchProbabilityInfo::getBranchProbStackProtector(true); |
469 | 13 | auto FailureProb = BranchProbabilityInfo::getBranchProbStackProtector(false); |
470 | 13 | MDNode *Weights = MDBuilder(F.getContext()) |
471 | 13 | .createBranchWeights(SuccessProb.getNumerator(), |
472 | 13 | FailureProb.getNumerator()); |
473 | 13 | Instruction *CheckTerm = |
474 | 13 | SplitBlockAndInsertIfThen(Cmp, &RI, |
475 | 13 | /* Unreachable */ true, Weights); |
476 | 13 | IRBuilder<> IRBFail(CheckTerm); |
477 | 13 | // FIXME: respect -fsanitize-trap / -ftrap-function here? |
478 | 13 | FunctionCallee StackChkFail = |
479 | 13 | F.getParent()->getOrInsertFunction("__stack_chk_fail", IRB.getVoidTy()); |
480 | 13 | IRBFail.CreateCall(StackChkFail, {}); |
481 | 13 | } |
482 | | |
483 | | /// We explicitly compute and set the unsafe stack layout for all unsafe |
484 | | /// static alloca instructions. We save the unsafe "base pointer" in the |
485 | | /// prologue into a local variable and restore it in the epilogue. |
486 | | Value *SafeStack::moveStaticAllocasToUnsafeStack( |
487 | | IRBuilder<> &IRB, Function &F, ArrayRef<AllocaInst *> StaticAllocas, |
488 | | ArrayRef<Argument *> ByValArguments, ArrayRef<ReturnInst *> Returns, |
489 | 161 | Instruction *BasePointer, AllocaInst *StackGuardSlot) { |
490 | 161 | if (StaticAllocas.empty() && ByValArguments.empty()15 ) |
491 | 9 | return BasePointer; |
492 | 152 | |
493 | 152 | DIBuilder DIB(*F.getParent()); |
494 | 152 | |
495 | 152 | StackColoring SSC(F, StaticAllocas); |
496 | 152 | SSC.run(); |
497 | 152 | SSC.removeAllMarkers(); |
498 | 152 | |
499 | 152 | // Unsafe stack always grows down. |
500 | 152 | StackLayout SSL(StackAlignment); |
501 | 152 | if (StackGuardSlot) { |
502 | 13 | Type *Ty = StackGuardSlot->getAllocatedType(); |
503 | 13 | unsigned Align = |
504 | 13 | std::max(DL.getPrefTypeAlignment(Ty), StackGuardSlot->getAlignment()); |
505 | 13 | SSL.addObject(StackGuardSlot, getStaticAllocaAllocationSize(StackGuardSlot), |
506 | 13 | Align, SSC.getFullLiveRange()); |
507 | 13 | } |
508 | 152 | |
509 | 152 | for (Argument *Arg : ByValArguments) { |
510 | 7 | Type *Ty = Arg->getType()->getPointerElementType(); |
511 | 7 | uint64_t Size = DL.getTypeStoreSize(Ty); |
512 | 7 | if (Size == 0) |
513 | 0 | Size = 1; // Don't create zero-sized stack objects. |
514 | 7 | |
515 | 7 | // Ensure the object is properly aligned. |
516 | 7 | unsigned Align = std::max((unsigned)DL.getPrefTypeAlignment(Ty), |
517 | 7 | Arg->getParamAlignment()); |
518 | 7 | SSL.addObject(Arg, Size, Align, SSC.getFullLiveRange()); |
519 | 7 | } |
520 | 152 | |
521 | 232 | for (AllocaInst *AI : StaticAllocas) { |
522 | 232 | Type *Ty = AI->getAllocatedType(); |
523 | 232 | uint64_t Size = getStaticAllocaAllocationSize(AI); |
524 | 232 | if (Size == 0) |
525 | 0 | Size = 1; // Don't create zero-sized stack objects. |
526 | 232 | |
527 | 232 | // Ensure the object is properly aligned. |
528 | 232 | unsigned Align = |
529 | 232 | std::max((unsigned)DL.getPrefTypeAlignment(Ty), AI->getAlignment()); |
530 | 232 | |
531 | 232 | SSL.addObject(AI, Size, Align, SSC.getLiveRange(AI)); |
532 | 232 | } |
533 | 152 | |
534 | 152 | SSL.computeLayout(); |
535 | 152 | unsigned FrameAlignment = SSL.getFrameAlignment(); |
536 | 152 | |
537 | 152 | // FIXME: tell SSL that we start at a less-then-MaxAlignment aligned location |
538 | 152 | // (AlignmentSkew). |
539 | 152 | if (FrameAlignment > StackAlignment) { |
540 | 2 | // Re-align the base pointer according to the max requested alignment. |
541 | 2 | assert(isPowerOf2_32(FrameAlignment)); |
542 | 2 | IRB.SetInsertPoint(BasePointer->getNextNode()); |
543 | 2 | BasePointer = cast<Instruction>(IRB.CreateIntToPtr( |
544 | 2 | IRB.CreateAnd(IRB.CreatePtrToInt(BasePointer, IntPtrTy), |
545 | 2 | ConstantInt::get(IntPtrTy, ~uint64_t(FrameAlignment - 1))), |
546 | 2 | StackPtrTy)); |
547 | 2 | } |
548 | 152 | |
549 | 152 | IRB.SetInsertPoint(BasePointer->getNextNode()); |
550 | 152 | |
551 | 152 | if (StackGuardSlot) { |
552 | 13 | unsigned Offset = SSL.getObjectOffset(StackGuardSlot); |
553 | 13 | Value *Off = IRB.CreateGEP(Int8Ty, BasePointer, // BasePointer is i8* |
554 | 13 | ConstantInt::get(Int32Ty, -Offset)); |
555 | 13 | Value *NewAI = |
556 | 13 | IRB.CreateBitCast(Off, StackGuardSlot->getType(), "StackGuardSlot"); |
557 | 13 | |
558 | 13 | // Replace alloc with the new location. |
559 | 13 | StackGuardSlot->replaceAllUsesWith(NewAI); |
560 | 13 | StackGuardSlot->eraseFromParent(); |
561 | 13 | } |
562 | 152 | |
563 | 152 | for (Argument *Arg : ByValArguments) { |
564 | 7 | unsigned Offset = SSL.getObjectOffset(Arg); |
565 | 7 | unsigned Align = SSL.getObjectAlignment(Arg); |
566 | 7 | Type *Ty = Arg->getType()->getPointerElementType(); |
567 | 7 | |
568 | 7 | uint64_t Size = DL.getTypeStoreSize(Ty); |
569 | 7 | if (Size == 0) |
570 | 0 | Size = 1; // Don't create zero-sized stack objects. |
571 | 7 | |
572 | 7 | Value *Off = IRB.CreateGEP(Int8Ty, BasePointer, // BasePointer is i8* |
573 | 7 | ConstantInt::get(Int32Ty, -Offset)); |
574 | 7 | Value *NewArg = IRB.CreateBitCast(Off, Arg->getType(), |
575 | 7 | Arg->getName() + ".unsafe-byval"); |
576 | 7 | |
577 | 7 | // Replace alloc with the new location. |
578 | 7 | replaceDbgDeclare(Arg, BasePointer, BasePointer->getNextNode(), DIB, |
579 | 7 | DIExpression::ApplyOffset, -Offset); |
580 | 7 | Arg->replaceAllUsesWith(NewArg); |
581 | 7 | IRB.SetInsertPoint(cast<Instruction>(NewArg)->getNextNode()); |
582 | 7 | IRB.CreateMemCpy(Off, Align, Arg, Arg->getParamAlignment(), Size); |
583 | 7 | } |
584 | 152 | |
585 | 152 | // Allocate space for every unsafe static AllocaInst on the unsafe stack. |
586 | 232 | for (AllocaInst *AI : StaticAllocas) { |
587 | 232 | IRB.SetInsertPoint(AI); |
588 | 232 | unsigned Offset = SSL.getObjectOffset(AI); |
589 | 232 | |
590 | 232 | replaceDbgDeclareForAlloca(AI, BasePointer, DIB, DIExpression::ApplyOffset, |
591 | 232 | -Offset); |
592 | 232 | replaceDbgValueForAlloca(AI, BasePointer, DIB, -Offset); |
593 | 232 | |
594 | 232 | // Replace uses of the alloca with the new location. |
595 | 232 | // Insert address calculation close to each use to work around PR27844. |
596 | 232 | std::string Name = std::string(AI->getName()) + ".unsafe"; |
597 | 500 | while (!AI->use_empty()) { |
598 | 268 | Use &U = *AI->use_begin(); |
599 | 268 | Instruction *User = cast<Instruction>(U.getUser()); |
600 | 268 | |
601 | 268 | Instruction *InsertBefore; |
602 | 268 | if (auto *PHI = dyn_cast<PHINode>(User)) |
603 | 6 | InsertBefore = PHI->getIncomingBlock(U)->getTerminator(); |
604 | 262 | else |
605 | 262 | InsertBefore = User; |
606 | 268 | |
607 | 268 | IRBuilder<> IRBUser(InsertBefore); |
608 | 268 | Value *Off = IRBUser.CreateGEP(Int8Ty, BasePointer, // BasePointer is i8* |
609 | 268 | ConstantInt::get(Int32Ty, -Offset)); |
610 | 268 | Value *Replacement = IRBUser.CreateBitCast(Off, AI->getType(), Name); |
611 | 268 | |
612 | 268 | if (auto *PHI = dyn_cast<PHINode>(User)) |
613 | 6 | // PHI nodes may have multiple incoming edges from the same BB (why??), |
614 | 6 | // all must be updated at once with the same incoming value. |
615 | 6 | PHI->setIncomingValueForBlock(PHI->getIncomingBlock(U), Replacement); |
616 | 262 | else |
617 | 262 | U.set(Replacement); |
618 | 268 | } |
619 | 232 | |
620 | 232 | AI->eraseFromParent(); |
621 | 232 | } |
622 | 152 | |
623 | 152 | // Re-align BasePointer so that our callees would see it aligned as |
624 | 152 | // expected. |
625 | 152 | // FIXME: no need to update BasePointer in leaf functions. |
626 | 152 | unsigned FrameSize = alignTo(SSL.getFrameSize(), StackAlignment); |
627 | 152 | |
628 | 152 | // Update shadow stack pointer in the function epilogue. |
629 | 152 | IRB.SetInsertPoint(BasePointer->getNextNode()); |
630 | 152 | |
631 | 152 | Value *StaticTop = |
632 | 152 | IRB.CreateGEP(Int8Ty, BasePointer, ConstantInt::get(Int32Ty, -FrameSize), |
633 | 152 | "unsafe_stack_static_top"); |
634 | 152 | IRB.CreateStore(StaticTop, UnsafeStackPtr); |
635 | 152 | return StaticTop; |
636 | 152 | } |
637 | | |
638 | | void SafeStack::moveDynamicAllocasToUnsafeStack( |
639 | | Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop, |
640 | 161 | ArrayRef<AllocaInst *> DynamicAllocas) { |
641 | 161 | DIBuilder DIB(*F.getParent()); |
642 | 161 | |
643 | 161 | for (AllocaInst *AI : DynamicAllocas) { |
644 | 9 | IRBuilder<> IRB(AI); |
645 | 9 | |
646 | 9 | // Compute the new SP value (after AI). |
647 | 9 | Value *ArraySize = AI->getArraySize(); |
648 | 9 | if (ArraySize->getType() != IntPtrTy) |
649 | 6 | ArraySize = IRB.CreateIntCast(ArraySize, IntPtrTy, false); |
650 | 9 | |
651 | 9 | Type *Ty = AI->getAllocatedType(); |
652 | 9 | uint64_t TySize = DL.getTypeAllocSize(Ty); |
653 | 9 | Value *Size = IRB.CreateMul(ArraySize, ConstantInt::get(IntPtrTy, TySize)); |
654 | 9 | |
655 | 9 | Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(StackPtrTy, UnsafeStackPtr), |
656 | 9 | IntPtrTy); |
657 | 9 | SP = IRB.CreateSub(SP, Size); |
658 | 9 | |
659 | 9 | // Align the SP value to satisfy the AllocaInst, type and stack alignments. |
660 | 9 | unsigned Align = std::max( |
661 | 9 | std::max((unsigned)DL.getPrefTypeAlignment(Ty), AI->getAlignment()), |
662 | 9 | (unsigned)StackAlignment); |
663 | 9 | |
664 | 9 | assert(isPowerOf2_32(Align)); |
665 | 9 | Value *NewTop = IRB.CreateIntToPtr( |
666 | 9 | IRB.CreateAnd(SP, ConstantInt::get(IntPtrTy, ~uint64_t(Align - 1))), |
667 | 9 | StackPtrTy); |
668 | 9 | |
669 | 9 | // Save the stack pointer. |
670 | 9 | IRB.CreateStore(NewTop, UnsafeStackPtr); |
671 | 9 | if (DynamicTop) |
672 | 3 | IRB.CreateStore(NewTop, DynamicTop); |
673 | 9 | |
674 | 9 | Value *NewAI = IRB.CreatePointerCast(NewTop, AI->getType()); |
675 | 9 | if (AI->hasName() && isa<Instruction>(NewAI)6 ) |
676 | 6 | NewAI->takeName(AI); |
677 | 9 | |
678 | 9 | replaceDbgDeclareForAlloca(AI, NewAI, DIB, DIExpression::ApplyOffset, 0); |
679 | 9 | AI->replaceAllUsesWith(NewAI); |
680 | 9 | AI->eraseFromParent(); |
681 | 9 | } |
682 | 161 | |
683 | 161 | if (!DynamicAllocas.empty()) { |
684 | 9 | // Now go through the instructions again, replacing stacksave/stackrestore. |
685 | 147 | for (inst_iterator It = inst_begin(&F), Ie = inst_end(&F); It != Ie;) { |
686 | 138 | Instruction *I = &*(It++); |
687 | 138 | auto II = dyn_cast<IntrinsicInst>(I); |
688 | 138 | if (!II) |
689 | 136 | continue; |
690 | 2 | |
691 | 2 | if (II->getIntrinsicID() == Intrinsic::stacksave) { |
692 | 0 | IRBuilder<> IRB(II); |
693 | 0 | Instruction *LI = IRB.CreateLoad(StackPtrTy, UnsafeStackPtr); |
694 | 0 | LI->takeName(II); |
695 | 0 | II->replaceAllUsesWith(LI); |
696 | 0 | II->eraseFromParent(); |
697 | 2 | } else if (II->getIntrinsicID() == Intrinsic::stackrestore) { |
698 | 0 | IRBuilder<> IRB(II); |
699 | 0 | Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr); |
700 | 0 | SI->takeName(II); |
701 | 0 | assert(II->use_empty()); |
702 | 0 | II->eraseFromParent(); |
703 | 0 | } |
704 | 2 | } |
705 | 9 | } |
706 | 161 | } |
707 | | |
708 | 3 | bool SafeStack::ShouldInlinePointerAddress(CallSite &CS) { |
709 | 3 | Function *Callee = CS.getCalledFunction(); |
710 | 3 | if (CS.hasFnAttr(Attribute::AlwaysInline) && isInlineViable(*Callee)0 ) |
711 | 0 | return true; |
712 | 3 | if (Callee->isInterposable() || Callee->hasFnAttribute(Attribute::NoInline) || |
713 | 3 | CS.isNoInline()2 ) |
714 | 1 | return false; |
715 | 2 | return true; |
716 | 2 | } |
717 | | |
718 | 161 | void SafeStack::TryInlinePointerAddress() { |
719 | 161 | if (!isa<CallInst>(UnsafeStackPtr)) |
720 | 155 | return; |
721 | 6 | |
722 | 6 | if(F.hasOptNone()) |
723 | 0 | return; |
724 | 6 | |
725 | 6 | CallSite CS(UnsafeStackPtr); |
726 | 6 | Function *Callee = CS.getCalledFunction(); |
727 | 6 | if (!Callee || Callee->isDeclaration()) |
728 | 3 | return; |
729 | 3 | |
730 | 3 | if (!ShouldInlinePointerAddress(CS)) |
731 | 1 | return; |
732 | 2 | |
733 | 2 | InlineFunctionInfo IFI; |
734 | 2 | InlineFunction(CS, IFI); |
735 | 2 | } |
736 | | |
737 | 190 | bool SafeStack::run() { |
738 | 190 | assert(F.hasFnAttribute(Attribute::SafeStack) && |
739 | 190 | "Can't run SafeStack on a function without the attribute"); |
740 | 190 | assert(!F.isDeclaration() && "Can't run SafeStack on a function declaration"); |
741 | 190 | |
742 | 190 | ++NumFunctions; |
743 | 190 | |
744 | 190 | SmallVector<AllocaInst *, 16> StaticAllocas; |
745 | 190 | SmallVector<AllocaInst *, 4> DynamicAllocas; |
746 | 190 | SmallVector<Argument *, 4> ByValArguments; |
747 | 190 | SmallVector<ReturnInst *, 4> Returns; |
748 | 190 | |
749 | 190 | // Collect all points where stack gets unwound and needs to be restored |
750 | 190 | // This is only necessary because the runtime (setjmp and unwind code) is |
751 | 190 | // not aware of the unsafe stack and won't unwind/restore it properly. |
752 | 190 | // To work around this problem without changing the runtime, we insert |
753 | 190 | // instrumentation to restore the unsafe stack pointer when necessary. |
754 | 190 | SmallVector<Instruction *, 4> StackRestorePoints; |
755 | 190 | |
756 | 190 | // Find all static and dynamic alloca instructions that must be moved to the |
757 | 190 | // unsafe stack, all return instructions and stack restore points. |
758 | 190 | findInsts(F, StaticAllocas, DynamicAllocas, ByValArguments, Returns, |
759 | 190 | StackRestorePoints); |
760 | 190 | |
761 | 190 | if (StaticAllocas.empty() && DynamicAllocas.empty()44 && |
762 | 190 | ByValArguments.empty()35 && StackRestorePoints.empty()29 ) |
763 | 29 | return false; // Nothing to do in this function. |
764 | 161 | |
765 | 161 | if (!StaticAllocas.empty() || !DynamicAllocas.empty()15 || |
766 | 161 | !ByValArguments.empty()6 ) |
767 | 161 | ++NumUnsafeStackFunctions; // This function has the unsafe stack. |
768 | 161 | |
769 | 161 | if (!StackRestorePoints.empty()) |
770 | 9 | ++NumUnsafeStackRestorePointsFunctions; |
771 | 161 | |
772 | 161 | IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt()); |
773 | 161 | // Calls must always have a debug location, or else inlining breaks. So |
774 | 161 | // we explicitly set a artificial debug location here. |
775 | 161 | if (DISubprogram *SP = F.getSubprogram()) |
776 | 4 | IRB.SetCurrentDebugLocation(DebugLoc::get(SP->getScopeLine(), 0, SP)); |
777 | 161 | if (SafeStackUsePointerAddress) { |
778 | 4 | FunctionCallee Fn = F.getParent()->getOrInsertFunction( |
779 | 4 | "__safestack_pointer_address", StackPtrTy->getPointerTo(0)); |
780 | 4 | UnsafeStackPtr = IRB.CreateCall(Fn); |
781 | 157 | } else { |
782 | 157 | UnsafeStackPtr = TL.getSafeStackPointerLocation(IRB); |
783 | 157 | } |
784 | 161 | |
785 | 161 | // Load the current stack pointer (we'll also use it as a base pointer). |
786 | 161 | // FIXME: use a dedicated register for it ? |
787 | 161 | Instruction *BasePointer = |
788 | 161 | IRB.CreateLoad(StackPtrTy, UnsafeStackPtr, false, "unsafe_stack_ptr"); |
789 | 161 | assert(BasePointer->getType() == StackPtrTy); |
790 | 161 | |
791 | 161 | AllocaInst *StackGuardSlot = nullptr; |
792 | 161 | // FIXME: implement weaker forms of stack protector. |
793 | 161 | if (F.hasFnAttribute(Attribute::StackProtect) || |
794 | 161 | F.hasFnAttribute(Attribute::StackProtectStrong) || |
795 | 161 | F.hasFnAttribute(Attribute::StackProtectReq)) { |
796 | 13 | Value *StackGuard = getStackGuard(IRB, F); |
797 | 13 | StackGuardSlot = IRB.CreateAlloca(StackPtrTy, nullptr); |
798 | 13 | IRB.CreateStore(StackGuard, StackGuardSlot); |
799 | 13 | |
800 | 13 | for (ReturnInst *RI : Returns) { |
801 | 13 | IRBuilder<> IRBRet(RI); |
802 | 13 | checkStackGuard(IRBRet, F, *RI, StackGuardSlot, StackGuard); |
803 | 13 | } |
804 | 13 | } |
805 | 161 | |
806 | 161 | // The top of the unsafe stack after all unsafe static allocas are |
807 | 161 | // allocated. |
808 | 161 | Value *StaticTop = |
809 | 161 | moveStaticAllocasToUnsafeStack(IRB, F, StaticAllocas, ByValArguments, |
810 | 161 | Returns, BasePointer, StackGuardSlot); |
811 | 161 | |
812 | 161 | // Safe stack object that stores the current unsafe stack top. It is updated |
813 | 161 | // as unsafe dynamic (non-constant-sized) allocas are allocated and freed. |
814 | 161 | // This is only needed if we need to restore stack pointer after longjmp |
815 | 161 | // or exceptions, and we have dynamic allocations. |
816 | 161 | // FIXME: a better alternative might be to store the unsafe stack pointer |
817 | 161 | // before setjmp / invoke instructions. |
818 | 161 | AllocaInst *DynamicTop = createStackRestorePoints( |
819 | 161 | IRB, F, StackRestorePoints, StaticTop, !DynamicAllocas.empty()); |
820 | 161 | |
821 | 161 | // Handle dynamic allocas. |
822 | 161 | moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop, |
823 | 161 | DynamicAllocas); |
824 | 161 | |
825 | 161 | // Restore the unsafe stack pointer before each return. |
826 | 171 | for (ReturnInst *RI : Returns) { |
827 | 171 | IRB.SetInsertPoint(RI); |
828 | 171 | IRB.CreateStore(BasePointer, UnsafeStackPtr); |
829 | 171 | } |
830 | 161 | |
831 | 161 | TryInlinePointerAddress(); |
832 | 161 | |
833 | 161 | LLVM_DEBUG(dbgs() << "[SafeStack] safestack applied\n"); |
834 | 161 | return true; |
835 | 161 | } |
836 | | |
837 | | class SafeStackLegacyPass : public FunctionPass { |
838 | | const TargetMachine *TM = nullptr; |
839 | | |
840 | | public: |
841 | | static char ID; // Pass identification, replacement for typeid.. |
842 | | |
843 | 36.4k | SafeStackLegacyPass() : FunctionPass(ID) { |
844 | 36.4k | initializeSafeStackLegacyPassPass(*PassRegistry::getPassRegistry()); |
845 | 36.4k | } |
846 | | |
847 | 36.2k | void getAnalysisUsage(AnalysisUsage &AU) const override { |
848 | 36.2k | AU.addRequired<TargetPassConfig>(); |
849 | 36.2k | AU.addRequired<TargetLibraryInfoWrapperPass>(); |
850 | 36.2k | AU.addRequired<AssumptionCacheTracker>(); |
851 | 36.2k | } |
852 | | |
853 | 499k | bool runOnFunction(Function &F) override { |
854 | 499k | LLVM_DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n"); |
855 | 499k | |
856 | 499k | if (!F.hasFnAttribute(Attribute::SafeStack)) { |
857 | 499k | LLVM_DEBUG(dbgs() << "[SafeStack] safestack is not requested" |
858 | 499k | " for this function\n"); |
859 | 499k | return false; |
860 | 499k | } |
861 | 187 | |
862 | 187 | if (F.isDeclaration()) { |
863 | 0 | LLVM_DEBUG(dbgs() << "[SafeStack] function definition" |
864 | 0 | " is not available\n"); |
865 | 0 | return false; |
866 | 0 | } |
867 | 187 | |
868 | 187 | TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>(); |
869 | 187 | auto *TL = TM->getSubtargetImpl(F)->getTargetLowering(); |
870 | 187 | if (!TL) |
871 | 0 | report_fatal_error("TargetLowering instance is required"); |
872 | 187 | |
873 | 187 | auto *DL = &F.getParent()->getDataLayout(); |
874 | 187 | auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); |
875 | 187 | auto &ACT = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); |
876 | 187 | |
877 | 187 | // Compute DT and LI only for functions that have the attribute. |
878 | 187 | // This is only useful because the legacy pass manager doesn't let us |
879 | 187 | // compute analyzes lazily. |
880 | 187 | // In the backend pipeline, nothing preserves DT before SafeStack, so we |
881 | 187 | // would otherwise always compute it wastefully, even if there is no |
882 | 187 | // function with the safestack attribute. |
883 | 187 | DominatorTree DT(F); |
884 | 187 | LoopInfo LI(DT); |
885 | 187 | |
886 | 187 | ScalarEvolution SE(F, TLI, ACT, DT, LI); |
887 | 187 | |
888 | 187 | return SafeStack(F, *TL, *DL, SE).run(); |
889 | 187 | } |
890 | | }; |
891 | | |
892 | | } // end anonymous namespace |
893 | | |
894 | | char SafeStackLegacyPass::ID = 0; |
895 | | |
896 | 49.1k | INITIALIZE_PASS_BEGIN(SafeStackLegacyPass, DEBUG_TYPE, |
897 | 49.1k | "Safe Stack instrumentation pass", false, false) |
898 | 49.1k | INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) |
899 | 49.1k | INITIALIZE_PASS_END(SafeStackLegacyPass, DEBUG_TYPE, |
900 | | "Safe Stack instrumentation pass", false, false) |
901 | | |
902 | 36.3k | FunctionPass *llvm::createSafeStackPass() { return new SafeStackLegacyPass(); } |