/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- EfficiencySanitizer.cpp - performance tuner -----------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file is a part of EfficiencySanitizer, a family of performance tuners |
10 | | // that detects multiple performance issues via separate sub-tools. |
11 | | // |
12 | | // The instrumentation phase is straightforward: |
13 | | // - Take action on every memory access: either inlined instrumentation, |
14 | | // or Inserted calls to our run-time library. |
15 | | // - Optimizations may apply to avoid instrumenting some of the accesses. |
16 | | // - Turn mem{set,cpy,move} instrinsics into library calls. |
17 | | // The rest is handled by the run-time library. |
18 | | //===----------------------------------------------------------------------===// |
19 | | |
20 | | #include "llvm/ADT/SmallString.h" |
21 | | #include "llvm/ADT/SmallVector.h" |
22 | | #include "llvm/ADT/Statistic.h" |
23 | | #include "llvm/ADT/StringExtras.h" |
24 | | #include "llvm/Analysis/TargetLibraryInfo.h" |
25 | | #include "llvm/Transforms/Utils/Local.h" |
26 | | #include "llvm/IR/Function.h" |
27 | | #include "llvm/IR/IRBuilder.h" |
28 | | #include "llvm/IR/IntrinsicInst.h" |
29 | | #include "llvm/IR/Module.h" |
30 | | #include "llvm/IR/Type.h" |
31 | | #include "llvm/Support/CommandLine.h" |
32 | | #include "llvm/Support/Debug.h" |
33 | | #include "llvm/Support/raw_ostream.h" |
34 | | #include "llvm/Transforms/Instrumentation.h" |
35 | | #include "llvm/Transforms/Utils/BasicBlockUtils.h" |
36 | | #include "llvm/Transforms/Utils/ModuleUtils.h" |
37 | | |
38 | | using namespace llvm; |
39 | | |
40 | | #define DEBUG_TYPE "esan" |
41 | | |
42 | | // The tool type must be just one of these ClTool* options, as the tools |
43 | | // cannot be combined due to shadow memory constraints. |
44 | | static cl::opt<bool> |
45 | | ClToolCacheFrag("esan-cache-frag", cl::init(false), |
46 | | cl::desc("Detect data cache fragmentation"), cl::Hidden); |
47 | | static cl::opt<bool> |
48 | | ClToolWorkingSet("esan-working-set", cl::init(false), |
49 | | cl::desc("Measure the working set size"), cl::Hidden); |
50 | | // Each new tool will get its own opt flag here. |
51 | | // These are converted to EfficiencySanitizerOptions for use |
52 | | // in the code. |
53 | | |
54 | | static cl::opt<bool> ClInstrumentLoadsAndStores( |
55 | | "esan-instrument-loads-and-stores", cl::init(true), |
56 | | cl::desc("Instrument loads and stores"), cl::Hidden); |
57 | | static cl::opt<bool> ClInstrumentMemIntrinsics( |
58 | | "esan-instrument-memintrinsics", cl::init(true), |
59 | | cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden); |
60 | | static cl::opt<bool> ClInstrumentFastpath( |
61 | | "esan-instrument-fastpath", cl::init(true), |
62 | | cl::desc("Instrument fastpath"), cl::Hidden); |
63 | | static cl::opt<bool> ClAuxFieldInfo( |
64 | | "esan-aux-field-info", cl::init(true), |
65 | | cl::desc("Generate binary with auxiliary struct field information"), |
66 | | cl::Hidden); |
67 | | |
68 | | // Experiments show that the performance difference can be 2x or more, |
69 | | // and accuracy loss is typically negligible, so we turn this on by default. |
70 | | static cl::opt<bool> ClAssumeIntraCacheLine( |
71 | | "esan-assume-intra-cache-line", cl::init(true), |
72 | | cl::desc("Assume each memory access touches just one cache line, for " |
73 | | "better performance but with a potential loss of accuracy."), |
74 | | cl::Hidden); |
75 | | |
76 | | STATISTIC(NumInstrumentedLoads, "Number of instrumented loads"); |
77 | | STATISTIC(NumInstrumentedStores, "Number of instrumented stores"); |
78 | | STATISTIC(NumFastpaths, "Number of instrumented fastpaths"); |
79 | | STATISTIC(NumAccessesWithIrregularSize, |
80 | | "Number of accesses with a size outside our targeted callout sizes"); |
81 | | STATISTIC(NumIgnoredStructs, "Number of ignored structs"); |
82 | | STATISTIC(NumIgnoredGEPs, "Number of ignored GEP instructions"); |
83 | | STATISTIC(NumInstrumentedGEPs, "Number of instrumented GEP instructions"); |
84 | | STATISTIC(NumAssumedIntraCacheLine, |
85 | | "Number of accesses assumed to be intra-cache-line"); |
86 | | |
87 | | static const uint64_t EsanCtorAndDtorPriority = 0; |
88 | | static const char *const EsanModuleCtorName = "esan.module_ctor"; |
89 | | static const char *const EsanModuleDtorName = "esan.module_dtor"; |
90 | | static const char *const EsanInitName = "__esan_init"; |
91 | | static const char *const EsanExitName = "__esan_exit"; |
92 | | |
93 | | // We need to specify the tool to the runtime earlier than |
94 | | // the ctor is called in some cases, so we set a global variable. |
95 | | static const char *const EsanWhichToolName = "__esan_which_tool"; |
96 | | |
97 | | // We must keep these Shadow* constants consistent with the esan runtime. |
98 | | // FIXME: Try to place these shadow constants, the names of the __esan_* |
99 | | // interface functions, and the ToolType enum into a header shared between |
100 | | // llvm and compiler-rt. |
101 | | struct ShadowMemoryParams { |
102 | | uint64_t ShadowMask; |
103 | | uint64_t ShadowOffs[3]; |
104 | | }; |
105 | | |
106 | | static const ShadowMemoryParams ShadowParams47 = { |
107 | | 0x00000fffffffffffull, |
108 | | { |
109 | | 0x0000130000000000ull, 0x0000220000000000ull, 0x0000440000000000ull, |
110 | | }}; |
111 | | |
112 | | static const ShadowMemoryParams ShadowParams40 = { |
113 | | 0x0fffffffffull, |
114 | | { |
115 | | 0x1300000000ull, 0x2200000000ull, 0x4400000000ull, |
116 | | }}; |
117 | | |
118 | | // This array is indexed by the ToolType enum. |
119 | | static const int ShadowScale[] = { |
120 | | 0, // ESAN_None. |
121 | | 2, // ESAN_CacheFrag: 4B:1B, so 4 to 1 == >>2. |
122 | | 6, // ESAN_WorkingSet: 64B:1B, so 64 to 1 == >>6. |
123 | | }; |
124 | | |
125 | | // MaxStructCounterNameSize is a soft size limit to avoid insanely long |
126 | | // names for those extremely large structs. |
127 | | static const unsigned MaxStructCounterNameSize = 512; |
128 | | |
129 | | namespace { |
130 | | |
131 | | static EfficiencySanitizerOptions |
132 | 10 | OverrideOptionsFromCL(EfficiencySanitizerOptions Options) { |
133 | 10 | if (ClToolCacheFrag) |
134 | 0 | Options.ToolType = EfficiencySanitizerOptions::ESAN_CacheFrag; |
135 | 10 | else if (ClToolWorkingSet) |
136 | 2 | Options.ToolType = EfficiencySanitizerOptions::ESAN_WorkingSet; |
137 | 10 | |
138 | 10 | // Direct opt invocation with no params will have the default ESAN_None. |
139 | 10 | // We run the default tool in that case. |
140 | 10 | if (Options.ToolType == EfficiencySanitizerOptions::ESAN_None) |
141 | 1 | Options.ToolType = EfficiencySanitizerOptions::ESAN_CacheFrag; |
142 | 10 | |
143 | 10 | return Options; |
144 | 10 | } |
145 | | |
146 | | /// EfficiencySanitizer: instrument each module to find performance issues. |
147 | | class EfficiencySanitizer : public ModulePass { |
148 | | public: |
149 | | EfficiencySanitizer( |
150 | | const EfficiencySanitizerOptions &Opts = EfficiencySanitizerOptions()) |
151 | 10 | : ModulePass(ID), Options(OverrideOptionsFromCL(Opts)) {} |
152 | | StringRef getPassName() const override; |
153 | | void getAnalysisUsage(AnalysisUsage &AU) const override; |
154 | | bool runOnModule(Module &M) override; |
155 | | static char ID; |
156 | | |
157 | | private: |
158 | | bool initOnModule(Module &M); |
159 | | void initializeCallbacks(Module &M); |
160 | | bool shouldIgnoreStructType(StructType *StructTy); |
161 | | void createStructCounterName( |
162 | | StructType *StructTy, SmallString<MaxStructCounterNameSize> &NameStr); |
163 | | void createCacheFragAuxGV( |
164 | | Module &M, const DataLayout &DL, StructType *StructTy, |
165 | | GlobalVariable *&TypeNames, GlobalVariable *&Offsets, GlobalVariable *&Size); |
166 | | GlobalVariable *createCacheFragInfoGV(Module &M, const DataLayout &DL, |
167 | | Constant *UnitName); |
168 | | Constant *createEsanInitToolInfoArg(Module &M, const DataLayout &DL); |
169 | | void createDestructor(Module &M, Constant *ToolInfoArg); |
170 | | bool runOnFunction(Function &F, Module &M); |
171 | | bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL); |
172 | | bool instrumentMemIntrinsic(MemIntrinsic *MI); |
173 | | bool instrumentGetElementPtr(Instruction *I, Module &M); |
174 | | bool insertCounterUpdate(Instruction *I, StructType *StructTy, |
175 | | unsigned CounterIdx); |
176 | 0 | unsigned getFieldCounterIdx(StructType *StructTy) { |
177 | 0 | return 0; |
178 | 0 | } |
179 | 0 | unsigned getArrayCounterIdx(StructType *StructTy) { |
180 | 0 | return StructTy->getNumElements(); |
181 | 0 | } |
182 | 0 | unsigned getStructCounterSize(StructType *StructTy) { |
183 | 0 | // The struct counter array includes: |
184 | 0 | // - one counter for each struct field, |
185 | 0 | // - one counter for the struct access within an array. |
186 | 0 | return (StructTy->getNumElements()/*field*/ + 1/*array*/); |
187 | 0 | } |
188 | | bool shouldIgnoreMemoryAccess(Instruction *I); |
189 | | int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL); |
190 | | Value *appToShadow(Value *Shadow, IRBuilder<> &IRB); |
191 | | bool instrumentFastpath(Instruction *I, const DataLayout &DL, bool IsStore, |
192 | | Value *Addr, unsigned Alignment); |
193 | | // Each tool has its own fastpath routine: |
194 | | bool instrumentFastpathCacheFrag(Instruction *I, const DataLayout &DL, |
195 | | Value *Addr, unsigned Alignment); |
196 | | bool instrumentFastpathWorkingSet(Instruction *I, const DataLayout &DL, |
197 | | Value *Addr, unsigned Alignment); |
198 | | |
199 | | EfficiencySanitizerOptions Options; |
200 | | LLVMContext *Ctx; |
201 | | Type *IntptrTy; |
202 | | // Our slowpath involves callouts to the runtime library. |
203 | | // Access sizes are powers of two: 1, 2, 4, 8, 16. |
204 | | static const size_t NumberOfAccessSizes = 5; |
205 | | FunctionCallee EsanAlignedLoad[NumberOfAccessSizes]; |
206 | | FunctionCallee EsanAlignedStore[NumberOfAccessSizes]; |
207 | | FunctionCallee EsanUnalignedLoad[NumberOfAccessSizes]; |
208 | | FunctionCallee EsanUnalignedStore[NumberOfAccessSizes]; |
209 | | // For irregular sizes of any alignment: |
210 | | FunctionCallee EsanUnalignedLoadN, EsanUnalignedStoreN; |
211 | | FunctionCallee MemmoveFn, MemcpyFn, MemsetFn; |
212 | | Function *EsanCtorFunction; |
213 | | Function *EsanDtorFunction; |
214 | | // Remember the counter variable for each struct type to avoid |
215 | | // recomputing the variable name later during instrumentation. |
216 | | std::map<Type *, GlobalVariable *> StructTyMap; |
217 | | ShadowMemoryParams ShadowParams; |
218 | | }; |
219 | | } // namespace |
220 | | |
221 | | char EfficiencySanitizer::ID = 0; |
222 | 7.01k | INITIALIZE_PASS_BEGIN( |
223 | 7.01k | EfficiencySanitizer, "esan", |
224 | 7.01k | "EfficiencySanitizer: finds performance issues.", false, false) |
225 | 7.01k | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) |
226 | 7.01k | INITIALIZE_PASS_END( |
227 | | EfficiencySanitizer, "esan", |
228 | | "EfficiencySanitizer: finds performance issues.", false, false) |
229 | | |
230 | 0 | StringRef EfficiencySanitizer::getPassName() const { |
231 | 0 | return "EfficiencySanitizer"; |
232 | 0 | } |
233 | | |
234 | 10 | void EfficiencySanitizer::getAnalysisUsage(AnalysisUsage &AU) const { |
235 | 10 | AU.addRequired<TargetLibraryInfoWrapperPass>(); |
236 | 10 | } |
237 | | |
238 | | ModulePass * |
239 | 7 | llvm::createEfficiencySanitizerPass(const EfficiencySanitizerOptions &Options) { |
240 | 7 | return new EfficiencySanitizer(Options); |
241 | 7 | } |
242 | | |
243 | 10 | void EfficiencySanitizer::initializeCallbacks(Module &M) { |
244 | 10 | IRBuilder<> IRB(M.getContext()); |
245 | 10 | // Initialize the callbacks. |
246 | 60 | for (size_t Idx = 0; Idx < NumberOfAccessSizes; ++Idx50 ) { |
247 | 50 | const unsigned ByteSize = 1U << Idx; |
248 | 50 | std::string ByteSizeStr = utostr(ByteSize); |
249 | 50 | // We'll inline the most common (i.e., aligned and frequent sizes) |
250 | 50 | // load + store instrumentation: these callouts are for the slowpath. |
251 | 50 | SmallString<32> AlignedLoadName("__esan_aligned_load" + ByteSizeStr); |
252 | 50 | EsanAlignedLoad[Idx] = M.getOrInsertFunction( |
253 | 50 | AlignedLoadName, IRB.getVoidTy(), IRB.getInt8PtrTy()); |
254 | 50 | SmallString<32> AlignedStoreName("__esan_aligned_store" + ByteSizeStr); |
255 | 50 | EsanAlignedStore[Idx] = M.getOrInsertFunction( |
256 | 50 | AlignedStoreName, IRB.getVoidTy(), IRB.getInt8PtrTy()); |
257 | 50 | SmallString<32> UnalignedLoadName("__esan_unaligned_load" + ByteSizeStr); |
258 | 50 | EsanUnalignedLoad[Idx] = M.getOrInsertFunction( |
259 | 50 | UnalignedLoadName, IRB.getVoidTy(), IRB.getInt8PtrTy()); |
260 | 50 | SmallString<32> UnalignedStoreName("__esan_unaligned_store" + ByteSizeStr); |
261 | 50 | EsanUnalignedStore[Idx] = M.getOrInsertFunction( |
262 | 50 | UnalignedStoreName, IRB.getVoidTy(), IRB.getInt8PtrTy()); |
263 | 50 | } |
264 | 10 | EsanUnalignedLoadN = M.getOrInsertFunction( |
265 | 10 | "__esan_unaligned_loadN", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy); |
266 | 10 | EsanUnalignedStoreN = M.getOrInsertFunction( |
267 | 10 | "__esan_unaligned_storeN", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy); |
268 | 10 | MemmoveFn = |
269 | 10 | M.getOrInsertFunction("memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), |
270 | 10 | IRB.getInt8PtrTy(), IntptrTy); |
271 | 10 | MemcpyFn = |
272 | 10 | M.getOrInsertFunction("memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), |
273 | 10 | IRB.getInt8PtrTy(), IntptrTy); |
274 | 10 | MemsetFn = |
275 | 10 | M.getOrInsertFunction("memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), |
276 | 10 | IRB.getInt32Ty(), IntptrTy); |
277 | 10 | } |
278 | | |
279 | 0 | bool EfficiencySanitizer::shouldIgnoreStructType(StructType *StructTy) { |
280 | 0 | if (StructTy == nullptr || StructTy->isOpaque() /* no struct body */) |
281 | 0 | return true; |
282 | 0 | return false; |
283 | 0 | } |
284 | | |
285 | | void EfficiencySanitizer::createStructCounterName( |
286 | 0 | StructType *StructTy, SmallString<MaxStructCounterNameSize> &NameStr) { |
287 | 0 | // Append NumFields and field type ids to avoid struct conflicts |
288 | 0 | // with the same name but different fields. |
289 | 0 | if (StructTy->hasName()) |
290 | 0 | NameStr += StructTy->getName(); |
291 | 0 | else |
292 | 0 | NameStr += "struct.anon"; |
293 | 0 | // We allow the actual size of the StructCounterName to be larger than |
294 | 0 | // MaxStructCounterNameSize and append $NumFields and at least one |
295 | 0 | // field type id. |
296 | 0 | // Append $NumFields. |
297 | 0 | NameStr += "$"; |
298 | 0 | Twine(StructTy->getNumElements()).toVector(NameStr); |
299 | 0 | // Append struct field type ids in the reverse order. |
300 | 0 | for (int i = StructTy->getNumElements() - 1; i >= 0; --i) { |
301 | 0 | NameStr += "$"; |
302 | 0 | Twine(StructTy->getElementType(i)->getTypeID()).toVector(NameStr); |
303 | 0 | if (NameStr.size() >= MaxStructCounterNameSize) |
304 | 0 | break; |
305 | 0 | } |
306 | 0 | if (StructTy->isLiteral()) { |
307 | 0 | // End with $ for literal struct. |
308 | 0 | NameStr += "$"; |
309 | 0 | } |
310 | 0 | } |
311 | | |
312 | | // Create global variables with auxiliary information (e.g., struct field size, |
313 | | // offset, and type name) for better user report. |
314 | | void EfficiencySanitizer::createCacheFragAuxGV( |
315 | | Module &M, const DataLayout &DL, StructType *StructTy, |
316 | | GlobalVariable *&TypeName, GlobalVariable *&Offset, |
317 | 0 | GlobalVariable *&Size) { |
318 | 0 | auto *Int8PtrTy = Type::getInt8PtrTy(*Ctx); |
319 | 0 | auto *Int32Ty = Type::getInt32Ty(*Ctx); |
320 | 0 | // FieldTypeName. |
321 | 0 | auto *TypeNameArrayTy = ArrayType::get(Int8PtrTy, StructTy->getNumElements()); |
322 | 0 | TypeName = new GlobalVariable(M, TypeNameArrayTy, true, |
323 | 0 | GlobalVariable::InternalLinkage, nullptr); |
324 | 0 | SmallVector<Constant *, 16> TypeNameVec; |
325 | 0 | // FieldOffset. |
326 | 0 | auto *OffsetArrayTy = ArrayType::get(Int32Ty, StructTy->getNumElements()); |
327 | 0 | Offset = new GlobalVariable(M, OffsetArrayTy, true, |
328 | 0 | GlobalVariable::InternalLinkage, nullptr); |
329 | 0 | SmallVector<Constant *, 16> OffsetVec; |
330 | 0 | // FieldSize |
331 | 0 | auto *SizeArrayTy = ArrayType::get(Int32Ty, StructTy->getNumElements()); |
332 | 0 | Size = new GlobalVariable(M, SizeArrayTy, true, |
333 | 0 | GlobalVariable::InternalLinkage, nullptr); |
334 | 0 | SmallVector<Constant *, 16> SizeVec; |
335 | 0 | for (unsigned i = 0; i < StructTy->getNumElements(); ++i) { |
336 | 0 | Type *Ty = StructTy->getElementType(i); |
337 | 0 | std::string Str; |
338 | 0 | raw_string_ostream StrOS(Str); |
339 | 0 | Ty->print(StrOS); |
340 | 0 | TypeNameVec.push_back( |
341 | 0 | ConstantExpr::getPointerCast( |
342 | 0 | createPrivateGlobalForString(M, StrOS.str(), true), |
343 | 0 | Int8PtrTy)); |
344 | 0 | OffsetVec.push_back( |
345 | 0 | ConstantInt::get(Int32Ty, |
346 | 0 | DL.getStructLayout(StructTy)->getElementOffset(i))); |
347 | 0 | SizeVec.push_back(ConstantInt::get(Int32Ty, |
348 | 0 | DL.getTypeAllocSize(Ty))); |
349 | 0 | } |
350 | 0 | TypeName->setInitializer(ConstantArray::get(TypeNameArrayTy, TypeNameVec)); |
351 | 0 | Offset->setInitializer(ConstantArray::get(OffsetArrayTy, OffsetVec)); |
352 | 0 | Size->setInitializer(ConstantArray::get(SizeArrayTy, SizeVec)); |
353 | 0 | } |
354 | | |
355 | | // Create the global variable for the cache-fragmentation tool. |
356 | | GlobalVariable *EfficiencySanitizer::createCacheFragInfoGV( |
357 | 5 | Module &M, const DataLayout &DL, Constant *UnitName) { |
358 | 5 | assert(Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag); |
359 | 5 | |
360 | 5 | auto *Int8PtrTy = Type::getInt8PtrTy(*Ctx); |
361 | 5 | auto *Int8PtrPtrTy = Int8PtrTy->getPointerTo(); |
362 | 5 | auto *Int32Ty = Type::getInt32Ty(*Ctx); |
363 | 5 | auto *Int32PtrTy = Type::getInt32PtrTy(*Ctx); |
364 | 5 | auto *Int64Ty = Type::getInt64Ty(*Ctx); |
365 | 5 | auto *Int64PtrTy = Type::getInt64PtrTy(*Ctx); |
366 | 5 | // This structure should be kept consistent with the StructInfo struct |
367 | 5 | // in the runtime library. |
368 | 5 | // struct StructInfo { |
369 | 5 | // const char *StructName; |
370 | 5 | // u32 Size; |
371 | 5 | // u32 NumFields; |
372 | 5 | // u32 *FieldOffset; // auxiliary struct field info. |
373 | 5 | // u32 *FieldSize; // auxiliary struct field info. |
374 | 5 | // const char **FieldTypeName; // auxiliary struct field info. |
375 | 5 | // u64 *FieldCounters; |
376 | 5 | // u64 *ArrayCounter; |
377 | 5 | // }; |
378 | 5 | auto *StructInfoTy = |
379 | 5 | StructType::get(Int8PtrTy, Int32Ty, Int32Ty, Int32PtrTy, Int32PtrTy, |
380 | 5 | Int8PtrPtrTy, Int64PtrTy, Int64PtrTy); |
381 | 5 | auto *StructInfoPtrTy = StructInfoTy->getPointerTo(); |
382 | 5 | // This structure should be kept consistent with the CacheFragInfo struct |
383 | 5 | // in the runtime library. |
384 | 5 | // struct CacheFragInfo { |
385 | 5 | // const char *UnitName; |
386 | 5 | // u32 NumStructs; |
387 | 5 | // StructInfo *Structs; |
388 | 5 | // }; |
389 | 5 | auto *CacheFragInfoTy = StructType::get(Int8PtrTy, Int32Ty, StructInfoPtrTy); |
390 | 5 | |
391 | 5 | std::vector<StructType *> Vec = M.getIdentifiedStructTypes(); |
392 | 5 | unsigned NumStructs = 0; |
393 | 5 | SmallVector<Constant *, 16> Initializers; |
394 | 5 | |
395 | 5 | for (auto &StructTy : Vec) { |
396 | 0 | if (shouldIgnoreStructType(StructTy)) { |
397 | 0 | ++NumIgnoredStructs; |
398 | 0 | continue; |
399 | 0 | } |
400 | 0 | ++NumStructs; |
401 | 0 |
|
402 | 0 | // StructName. |
403 | 0 | SmallString<MaxStructCounterNameSize> CounterNameStr; |
404 | 0 | createStructCounterName(StructTy, CounterNameStr); |
405 | 0 | GlobalVariable *StructCounterName = createPrivateGlobalForString( |
406 | 0 | M, CounterNameStr, /*AllowMerging*/true); |
407 | 0 |
|
408 | 0 | // Counters. |
409 | 0 | // We create the counter array with StructCounterName and weak linkage |
410 | 0 | // so that the structs with the same name and layout from different |
411 | 0 | // compilation units will be merged into one. |
412 | 0 | auto *CounterArrayTy = ArrayType::get(Int64Ty, |
413 | 0 | getStructCounterSize(StructTy)); |
414 | 0 | GlobalVariable *Counters = |
415 | 0 | new GlobalVariable(M, CounterArrayTy, false, |
416 | 0 | GlobalVariable::WeakAnyLinkage, |
417 | 0 | ConstantAggregateZero::get(CounterArrayTy), |
418 | 0 | CounterNameStr); |
419 | 0 |
|
420 | 0 | // Remember the counter variable for each struct type. |
421 | 0 | StructTyMap.insert(std::pair<Type *, GlobalVariable *>(StructTy, Counters)); |
422 | 0 |
|
423 | 0 | // We pass the field type name array, offset array, and size array to |
424 | 0 | // the runtime for better reporting. |
425 | 0 | GlobalVariable *TypeName = nullptr, *Offset = nullptr, *Size = nullptr; |
426 | 0 | if (ClAuxFieldInfo) |
427 | 0 | createCacheFragAuxGV(M, DL, StructTy, TypeName, Offset, Size); |
428 | 0 |
|
429 | 0 | Constant *FieldCounterIdx[2]; |
430 | 0 | FieldCounterIdx[0] = ConstantInt::get(Int32Ty, 0); |
431 | 0 | FieldCounterIdx[1] = ConstantInt::get(Int32Ty, |
432 | 0 | getFieldCounterIdx(StructTy)); |
433 | 0 | Constant *ArrayCounterIdx[2]; |
434 | 0 | ArrayCounterIdx[0] = ConstantInt::get(Int32Ty, 0); |
435 | 0 | ArrayCounterIdx[1] = ConstantInt::get(Int32Ty, |
436 | 0 | getArrayCounterIdx(StructTy)); |
437 | 0 | Initializers.push_back(ConstantStruct::get( |
438 | 0 | StructInfoTy, |
439 | 0 | ConstantExpr::getPointerCast(StructCounterName, Int8PtrTy), |
440 | 0 | ConstantInt::get(Int32Ty, |
441 | 0 | DL.getStructLayout(StructTy)->getSizeInBytes()), |
442 | 0 | ConstantInt::get(Int32Ty, StructTy->getNumElements()), |
443 | 0 | Offset == nullptr ? ConstantPointerNull::get(Int32PtrTy) |
444 | 0 | : ConstantExpr::getPointerCast(Offset, Int32PtrTy), |
445 | 0 | Size == nullptr ? ConstantPointerNull::get(Int32PtrTy) |
446 | 0 | : ConstantExpr::getPointerCast(Size, Int32PtrTy), |
447 | 0 | TypeName == nullptr |
448 | 0 | ? ConstantPointerNull::get(Int8PtrPtrTy) |
449 | 0 | : ConstantExpr::getPointerCast(TypeName, Int8PtrPtrTy), |
450 | 0 | ConstantExpr::getGetElementPtr(CounterArrayTy, Counters, |
451 | 0 | FieldCounterIdx), |
452 | 0 | ConstantExpr::getGetElementPtr(CounterArrayTy, Counters, |
453 | 0 | ArrayCounterIdx))); |
454 | 0 | } |
455 | 5 | // Structs. |
456 | 5 | Constant *StructInfo; |
457 | 5 | if (NumStructs == 0) { |
458 | 5 | StructInfo = ConstantPointerNull::get(StructInfoPtrTy); |
459 | 5 | } else { |
460 | 0 | auto *StructInfoArrayTy = ArrayType::get(StructInfoTy, NumStructs); |
461 | 0 | StructInfo = ConstantExpr::getPointerCast( |
462 | 0 | new GlobalVariable(M, StructInfoArrayTy, false, |
463 | 0 | GlobalVariable::InternalLinkage, |
464 | 0 | ConstantArray::get(StructInfoArrayTy, Initializers)), |
465 | 0 | StructInfoPtrTy); |
466 | 0 | } |
467 | 5 | |
468 | 5 | auto *CacheFragInfoGV = new GlobalVariable( |
469 | 5 | M, CacheFragInfoTy, true, GlobalVariable::InternalLinkage, |
470 | 5 | ConstantStruct::get(CacheFragInfoTy, UnitName, |
471 | 5 | ConstantInt::get(Int32Ty, NumStructs), StructInfo)); |
472 | 5 | return CacheFragInfoGV; |
473 | 5 | } |
474 | | |
475 | | // Create the tool-specific argument passed to EsanInit and EsanExit. |
476 | | Constant *EfficiencySanitizer::createEsanInitToolInfoArg(Module &M, |
477 | 10 | const DataLayout &DL) { |
478 | 10 | // This structure contains tool-specific information about each compilation |
479 | 10 | // unit (module) and is passed to the runtime library. |
480 | 10 | GlobalVariable *ToolInfoGV = nullptr; |
481 | 10 | |
482 | 10 | auto *Int8PtrTy = Type::getInt8PtrTy(*Ctx); |
483 | 10 | // Compilation unit name. |
484 | 10 | auto *UnitName = ConstantExpr::getPointerCast( |
485 | 10 | createPrivateGlobalForString(M, M.getModuleIdentifier(), true), |
486 | 10 | Int8PtrTy); |
487 | 10 | |
488 | 10 | // Create the tool-specific variable. |
489 | 10 | if (Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag) |
490 | 5 | ToolInfoGV = createCacheFragInfoGV(M, DL, UnitName); |
491 | 10 | |
492 | 10 | if (ToolInfoGV != nullptr) |
493 | 5 | return ConstantExpr::getPointerCast(ToolInfoGV, Int8PtrTy); |
494 | 5 | |
495 | 5 | // Create the null pointer if no tool-specific variable created. |
496 | 5 | return ConstantPointerNull::get(Int8PtrTy); |
497 | 5 | } |
498 | | |
499 | 10 | void EfficiencySanitizer::createDestructor(Module &M, Constant *ToolInfoArg) { |
500 | 10 | PointerType *Int8PtrTy = Type::getInt8PtrTy(*Ctx); |
501 | 10 | EsanDtorFunction = Function::Create(FunctionType::get(Type::getVoidTy(*Ctx), |
502 | 10 | false), |
503 | 10 | GlobalValue::InternalLinkage, |
504 | 10 | EsanModuleDtorName, &M); |
505 | 10 | ReturnInst::Create(*Ctx, BasicBlock::Create(*Ctx, "", EsanDtorFunction)); |
506 | 10 | IRBuilder<> IRB_Dtor(EsanDtorFunction->getEntryBlock().getTerminator()); |
507 | 10 | FunctionCallee EsanExit = |
508 | 10 | M.getOrInsertFunction(EsanExitName, IRB_Dtor.getVoidTy(), Int8PtrTy); |
509 | 10 | IRB_Dtor.CreateCall(EsanExit, {ToolInfoArg}); |
510 | 10 | appendToGlobalDtors(M, EsanDtorFunction, EsanCtorAndDtorPriority); |
511 | 10 | } |
512 | | |
513 | 10 | bool EfficiencySanitizer::initOnModule(Module &M) { |
514 | 10 | |
515 | 10 | Triple TargetTriple(M.getTargetTriple()); |
516 | 10 | if (TargetTriple.isMIPS64()) |
517 | 2 | ShadowParams = ShadowParams40; |
518 | 8 | else |
519 | 8 | ShadowParams = ShadowParams47; |
520 | 10 | |
521 | 10 | Ctx = &M.getContext(); |
522 | 10 | const DataLayout &DL = M.getDataLayout(); |
523 | 10 | IRBuilder<> IRB(M.getContext()); |
524 | 10 | IntegerType *OrdTy = IRB.getInt32Ty(); |
525 | 10 | PointerType *Int8PtrTy = Type::getInt8PtrTy(*Ctx); |
526 | 10 | IntptrTy = DL.getIntPtrType(M.getContext()); |
527 | 10 | // Create the variable passed to EsanInit and EsanExit. |
528 | 10 | Constant *ToolInfoArg = createEsanInitToolInfoArg(M, DL); |
529 | 10 | // Constructor |
530 | 10 | // We specify the tool type both in the EsanWhichToolName global |
531 | 10 | // and as an arg to the init routine as a sanity check. |
532 | 10 | std::tie(EsanCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions( |
533 | 10 | M, EsanModuleCtorName, EsanInitName, /*InitArgTypes=*/{OrdTy, Int8PtrTy}, |
534 | 10 | /*InitArgs=*/{ |
535 | 10 | ConstantInt::get(OrdTy, static_cast<int>(Options.ToolType)), |
536 | 10 | ToolInfoArg}); |
537 | 10 | appendToGlobalCtors(M, EsanCtorFunction, EsanCtorAndDtorPriority); |
538 | 10 | |
539 | 10 | createDestructor(M, ToolInfoArg); |
540 | 10 | |
541 | 10 | new GlobalVariable(M, OrdTy, true, |
542 | 10 | GlobalValue::WeakAnyLinkage, |
543 | 10 | ConstantInt::get(OrdTy, |
544 | 10 | static_cast<int>(Options.ToolType)), |
545 | 10 | EsanWhichToolName); |
546 | 10 | |
547 | 10 | return true; |
548 | 10 | } |
549 | | |
550 | 19 | Value *EfficiencySanitizer::appToShadow(Value *Shadow, IRBuilder<> &IRB) { |
551 | 19 | // Shadow = ((App & Mask) + Offs) >> Scale |
552 | 19 | Shadow = IRB.CreateAnd(Shadow, ConstantInt::get(IntptrTy, ShadowParams.ShadowMask)); |
553 | 19 | uint64_t Offs; |
554 | 19 | int Scale = ShadowScale[Options.ToolType]; |
555 | 19 | if (Scale <= 2) |
556 | 0 | Offs = ShadowParams.ShadowOffs[Scale]; |
557 | 19 | else |
558 | 19 | Offs = ShadowParams.ShadowOffs[0] << Scale; |
559 | 19 | Shadow = IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Offs)); |
560 | 19 | if (Scale > 0) |
561 | 19 | Shadow = IRB.CreateLShr(Shadow, Scale); |
562 | 19 | return Shadow; |
563 | 19 | } |
564 | | |
565 | 31 | bool EfficiencySanitizer::shouldIgnoreMemoryAccess(Instruction *I) { |
566 | 31 | if (Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag) { |
567 | 8 | // We'd like to know about cache fragmentation in vtable accesses and |
568 | 8 | // constant data references, so we do not currently ignore anything. |
569 | 8 | return false; |
570 | 23 | } else if (Options.ToolType == EfficiencySanitizerOptions::ESAN_WorkingSet) { |
571 | 23 | // TODO: the instrumentation disturbs the data layout on the stack, so we |
572 | 23 | // may want to add an option to ignore stack references (if we can |
573 | 23 | // distinguish them) to reduce overhead. |
574 | 23 | } |
575 | 31 | // TODO(bruening): future tools will be returning true for some cases. |
576 | 31 | return false23 ; |
577 | 31 | } |
578 | | |
579 | 10 | bool EfficiencySanitizer::runOnModule(Module &M) { |
580 | 10 | bool Res = initOnModule(M); |
581 | 10 | initializeCallbacks(M); |
582 | 335 | for (auto &F : M) { |
583 | 335 | Res |= runOnFunction(F, M); |
584 | 335 | } |
585 | 10 | return Res; |
586 | 10 | } |
587 | | |
588 | 335 | bool EfficiencySanitizer::runOnFunction(Function &F, Module &M) { |
589 | 335 | // This is required to prevent instrumenting the call to __esan_init from |
590 | 335 | // within the module constructor. |
591 | 335 | if (&F == EsanCtorFunction) |
592 | 10 | return false; |
593 | 325 | SmallVector<Instruction *, 8> LoadsAndStores; |
594 | 325 | SmallVector<Instruction *, 8> MemIntrinCalls; |
595 | 325 | SmallVector<Instruction *, 8> GetElementPtrs; |
596 | 325 | bool Res = false; |
597 | 325 | const DataLayout &DL = M.getDataLayout(); |
598 | 325 | const TargetLibraryInfo *TLI = |
599 | 325 | &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); |
600 | 325 | |
601 | 325 | for (auto &BB : F) { |
602 | 99 | for (auto &Inst : BB) { |
603 | 99 | if ((isa<LoadInst>(Inst) || isa<StoreInst>(Inst)71 || |
604 | 99 | isa<AtomicRMWInst>(Inst)68 || isa<AtomicCmpXchgInst>(Inst)68 ) && |
605 | 99 | !shouldIgnoreMemoryAccess(&Inst)31 ) |
606 | 31 | LoadsAndStores.push_back(&Inst); |
607 | 68 | else if (isa<MemIntrinsic>(Inst)) |
608 | 3 | MemIntrinCalls.push_back(&Inst); |
609 | 65 | else if (isa<GetElementPtrInst>(Inst)) |
610 | 0 | GetElementPtrs.push_back(&Inst); |
611 | 65 | else if (CallInst *CI = dyn_cast<CallInst>(&Inst)) |
612 | 20 | maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI); |
613 | 99 | } |
614 | 42 | } |
615 | 325 | |
616 | 325 | if (ClInstrumentLoadsAndStores) { |
617 | 325 | for (auto Inst : LoadsAndStores) { |
618 | 31 | Res |= instrumentLoadOrStore(Inst, DL); |
619 | 31 | } |
620 | 325 | } |
621 | 325 | |
622 | 325 | if (ClInstrumentMemIntrinsics) { |
623 | 325 | for (auto Inst : MemIntrinCalls) { |
624 | 3 | Res |= instrumentMemIntrinsic(cast<MemIntrinsic>(Inst)); |
625 | 3 | } |
626 | 325 | } |
627 | 325 | |
628 | 325 | if (Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag) { |
629 | 152 | for (auto Inst : GetElementPtrs) { |
630 | 0 | Res |= instrumentGetElementPtr(Inst, M); |
631 | 0 | } |
632 | 152 | } |
633 | 325 | |
634 | 325 | return Res; |
635 | 325 | } |
636 | | |
637 | | bool EfficiencySanitizer::instrumentLoadOrStore(Instruction *I, |
638 | 31 | const DataLayout &DL) { |
639 | 31 | IRBuilder<> IRB(I); |
640 | 31 | bool IsStore; |
641 | 31 | Value *Addr; |
642 | 31 | unsigned Alignment; |
643 | 31 | if (LoadInst *Load = dyn_cast<LoadInst>(I)) { |
644 | 28 | IsStore = false; |
645 | 28 | Alignment = Load->getAlignment(); |
646 | 28 | Addr = Load->getPointerOperand(); |
647 | 28 | } else if (StoreInst *3 Store3 = dyn_cast<StoreInst>(I)) { |
648 | 3 | IsStore = true; |
649 | 3 | Alignment = Store->getAlignment(); |
650 | 3 | Addr = Store->getPointerOperand(); |
651 | 3 | } else if (AtomicRMWInst *0 RMW0 = dyn_cast<AtomicRMWInst>(I)) { |
652 | 0 | IsStore = true; |
653 | 0 | Alignment = 0; |
654 | 0 | Addr = RMW->getPointerOperand(); |
655 | 0 | } else if (AtomicCmpXchgInst *Xchg = dyn_cast<AtomicCmpXchgInst>(I)) { |
656 | 0 | IsStore = true; |
657 | 0 | Alignment = 0; |
658 | 0 | Addr = Xchg->getPointerOperand(); |
659 | 0 | } else |
660 | 0 | llvm_unreachable("Unsupported mem access type"); |
661 | 31 | |
662 | 31 | Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType(); |
663 | 31 | const uint32_t TypeSizeBytes = DL.getTypeStoreSizeInBits(OrigTy) / 8; |
664 | 31 | FunctionCallee OnAccessFunc = nullptr; |
665 | 31 | |
666 | 31 | // Convert 0 to the default alignment. |
667 | 31 | if (Alignment == 0) |
668 | 0 | Alignment = DL.getPrefTypeAlignment(OrigTy); |
669 | 31 | |
670 | 31 | if (IsStore) |
671 | 3 | NumInstrumentedStores++; |
672 | 28 | else |
673 | 28 | NumInstrumentedLoads++; |
674 | 31 | int Idx = getMemoryAccessFuncIndex(Addr, DL); |
675 | 31 | if (Idx < 0) { |
676 | 0 | OnAccessFunc = IsStore ? EsanUnalignedStoreN : EsanUnalignedLoadN; |
677 | 0 | IRB.CreateCall(OnAccessFunc, |
678 | 0 | {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), |
679 | 0 | ConstantInt::get(IntptrTy, TypeSizeBytes)}); |
680 | 31 | } else { |
681 | 31 | if (ClInstrumentFastpath && |
682 | 31 | instrumentFastpath(I, DL, IsStore, Addr, Alignment)) { |
683 | 27 | NumFastpaths++; |
684 | 27 | return true; |
685 | 27 | } |
686 | 4 | if (Alignment == 0 || (Alignment % TypeSizeBytes) == 0) |
687 | 0 | OnAccessFunc = IsStore ? EsanAlignedStore[Idx] : EsanAlignedLoad[Idx]; |
688 | 4 | else |
689 | 4 | OnAccessFunc = IsStore ? EsanUnalignedStore[Idx]0 : EsanUnalignedLoad[Idx]; |
690 | 4 | IRB.CreateCall(OnAccessFunc, |
691 | 4 | IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); |
692 | 4 | } |
693 | 31 | return true4 ; |
694 | 31 | } |
695 | | |
696 | | // It's simplest to replace the memset/memmove/memcpy intrinsics with |
697 | | // calls that the runtime library intercepts. |
698 | | // Our pass is late enough that calls should not turn back into intrinsics. |
699 | 3 | bool EfficiencySanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { |
700 | 3 | IRBuilder<> IRB(MI); |
701 | 3 | bool Res = false; |
702 | 3 | if (isa<MemSetInst>(MI)) { |
703 | 1 | IRB.CreateCall( |
704 | 1 | MemsetFn, |
705 | 1 | {IRB.CreatePointerCast(MI->getArgOperand(0), IRB.getInt8PtrTy()), |
706 | 1 | IRB.CreateIntCast(MI->getArgOperand(1), IRB.getInt32Ty(), false), |
707 | 1 | IRB.CreateIntCast(MI->getArgOperand(2), IntptrTy, false)}); |
708 | 1 | MI->eraseFromParent(); |
709 | 1 | Res = true; |
710 | 2 | } else if (isa<MemTransferInst>(MI)) { |
711 | 2 | IRB.CreateCall( |
712 | 2 | isa<MemCpyInst>(MI) ? MemcpyFn1 : MemmoveFn1 , |
713 | 2 | {IRB.CreatePointerCast(MI->getArgOperand(0), IRB.getInt8PtrTy()), |
714 | 2 | IRB.CreatePointerCast(MI->getArgOperand(1), IRB.getInt8PtrTy()), |
715 | 2 | IRB.CreateIntCast(MI->getArgOperand(2), IntptrTy, false)}); |
716 | 2 | MI->eraseFromParent(); |
717 | 2 | Res = true; |
718 | 2 | } else |
719 | 2 | llvm_unreachable0 ("Unsupported mem intrinsic type"); |
720 | 3 | return Res; |
721 | 3 | } |
722 | | |
723 | 0 | bool EfficiencySanitizer::instrumentGetElementPtr(Instruction *I, Module &M) { |
724 | 0 | GetElementPtrInst *GepInst = dyn_cast<GetElementPtrInst>(I); |
725 | 0 | bool Res = false; |
726 | 0 | if (GepInst == nullptr || GepInst->getNumIndices() == 1) { |
727 | 0 | ++NumIgnoredGEPs; |
728 | 0 | return false; |
729 | 0 | } |
730 | 0 | Type *SourceTy = GepInst->getSourceElementType(); |
731 | 0 | StructType *StructTy = nullptr; |
732 | 0 | ConstantInt *Idx; |
733 | 0 | // Check if GEP calculates address from a struct array. |
734 | 0 | if (isa<StructType>(SourceTy)) { |
735 | 0 | StructTy = cast<StructType>(SourceTy); |
736 | 0 | Idx = dyn_cast<ConstantInt>(GepInst->getOperand(1)); |
737 | 0 | if ((Idx == nullptr || Idx->getSExtValue() != 0) && |
738 | 0 | !shouldIgnoreStructType(StructTy) && StructTyMap.count(StructTy) != 0) |
739 | 0 | Res |= insertCounterUpdate(I, StructTy, getArrayCounterIdx(StructTy)); |
740 | 0 | } |
741 | 0 | // Iterate all (except the first and the last) idx within each GEP instruction |
742 | 0 | // for possible nested struct field address calculation. |
743 | 0 | for (unsigned i = 1; i < GepInst->getNumIndices(); ++i) { |
744 | 0 | SmallVector<Value *, 8> IdxVec(GepInst->idx_begin(), |
745 | 0 | GepInst->idx_begin() + i); |
746 | 0 | Type *Ty = GetElementPtrInst::getIndexedType(SourceTy, IdxVec); |
747 | 0 | unsigned CounterIdx = 0; |
748 | 0 | if (isa<ArrayType>(Ty)) { |
749 | 0 | ArrayType *ArrayTy = cast<ArrayType>(Ty); |
750 | 0 | StructTy = dyn_cast<StructType>(ArrayTy->getElementType()); |
751 | 0 | if (shouldIgnoreStructType(StructTy) || StructTyMap.count(StructTy) == 0) |
752 | 0 | continue; |
753 | 0 | // The last counter for struct array access. |
754 | 0 | CounterIdx = getArrayCounterIdx(StructTy); |
755 | 0 | } else if (isa<StructType>(Ty)) { |
756 | 0 | StructTy = cast<StructType>(Ty); |
757 | 0 | if (shouldIgnoreStructType(StructTy) || StructTyMap.count(StructTy) == 0) |
758 | 0 | continue; |
759 | 0 | // Get the StructTy's subfield index. |
760 | 0 | Idx = cast<ConstantInt>(GepInst->getOperand(i+1)); |
761 | 0 | assert(Idx->getSExtValue() >= 0 && |
762 | 0 | Idx->getSExtValue() < StructTy->getNumElements()); |
763 | 0 | CounterIdx = getFieldCounterIdx(StructTy) + Idx->getSExtValue(); |
764 | 0 | } |
765 | 0 | Res |= insertCounterUpdate(I, StructTy, CounterIdx); |
766 | 0 | } |
767 | 0 | if (Res) |
768 | 0 | ++NumInstrumentedGEPs; |
769 | 0 | else |
770 | 0 | ++NumIgnoredGEPs; |
771 | 0 | return Res; |
772 | 0 | } |
773 | | |
774 | | bool EfficiencySanitizer::insertCounterUpdate(Instruction *I, |
775 | | StructType *StructTy, |
776 | 0 | unsigned CounterIdx) { |
777 | 0 | GlobalVariable *CounterArray = StructTyMap[StructTy]; |
778 | 0 | if (CounterArray == nullptr) |
779 | 0 | return false; |
780 | 0 | IRBuilder<> IRB(I); |
781 | 0 | Constant *Indices[2]; |
782 | 0 | // Xref http://llvm.org/docs/LangRef.html#i-getelementptr and |
783 | 0 | // http://llvm.org/docs/GetElementPtr.html. |
784 | 0 | // The first index of the GEP instruction steps through the first operand, |
785 | 0 | // i.e., the array itself. |
786 | 0 | Indices[0] = ConstantInt::get(IRB.getInt32Ty(), 0); |
787 | 0 | // The second index is the index within the array. |
788 | 0 | Indices[1] = ConstantInt::get(IRB.getInt32Ty(), CounterIdx); |
789 | 0 | Constant *Counter = |
790 | 0 | ConstantExpr::getGetElementPtr( |
791 | 0 | ArrayType::get(IRB.getInt64Ty(), getStructCounterSize(StructTy)), |
792 | 0 | CounterArray, Indices); |
793 | 0 | Value *Load = IRB.CreateLoad(IRB.getInt64Ty(), Counter); |
794 | 0 | IRB.CreateStore(IRB.CreateAdd(Load, ConstantInt::get(IRB.getInt64Ty(), 1)), |
795 | 0 | Counter); |
796 | 0 | return true; |
797 | 0 | } |
798 | | |
799 | | int EfficiencySanitizer::getMemoryAccessFuncIndex(Value *Addr, |
800 | 31 | const DataLayout &DL) { |
801 | 31 | Type *OrigPtrTy = Addr->getType(); |
802 | 31 | Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType(); |
803 | 31 | assert(OrigTy->isSized()); |
804 | 31 | // The size is always a multiple of 8. |
805 | 31 | uint32_t TypeSizeBytes = DL.getTypeStoreSizeInBits(OrigTy) / 8; |
806 | 31 | if (TypeSizeBytes != 1 && TypeSizeBytes != 229 && TypeSizeBytes != 425 && |
807 | 31 | TypeSizeBytes != 814 && TypeSizeBytes != 164 ) { |
808 | 0 | // Irregular sizes do not have per-size call targets. |
809 | 0 | NumAccessesWithIrregularSize++; |
810 | 0 | return -1; |
811 | 0 | } |
812 | 31 | size_t Idx = countTrailingZeros(TypeSizeBytes); |
813 | 31 | assert(Idx < NumberOfAccessSizes); |
814 | 31 | return Idx; |
815 | 31 | } |
816 | | |
817 | | bool EfficiencySanitizer::instrumentFastpath(Instruction *I, |
818 | | const DataLayout &DL, bool IsStore, |
819 | 31 | Value *Addr, unsigned Alignment) { |
820 | 31 | if (Options.ToolType == EfficiencySanitizerOptions::ESAN_CacheFrag) { |
821 | 8 | return instrumentFastpathCacheFrag(I, DL, Addr, Alignment); |
822 | 23 | } else if (Options.ToolType == EfficiencySanitizerOptions::ESAN_WorkingSet) { |
823 | 23 | return instrumentFastpathWorkingSet(I, DL, Addr, Alignment); |
824 | 23 | } |
825 | 0 | return false; |
826 | 0 | } |
827 | | |
828 | | bool EfficiencySanitizer::instrumentFastpathCacheFrag(Instruction *I, |
829 | | const DataLayout &DL, |
830 | | Value *Addr, |
831 | 8 | unsigned Alignment) { |
832 | 8 | // Do nothing. |
833 | 8 | return true; // Return true to avoid slowpath instrumentation. |
834 | 8 | } |
835 | | |
836 | | bool EfficiencySanitizer::instrumentFastpathWorkingSet( |
837 | 23 | Instruction *I, const DataLayout &DL, Value *Addr, unsigned Alignment) { |
838 | 23 | assert(ShadowScale[Options.ToolType] == 6); // The code below assumes this |
839 | 23 | IRBuilder<> IRB(I); |
840 | 23 | Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType(); |
841 | 23 | const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy); |
842 | 23 | // Bail to the slowpath if the access might touch multiple cache lines. |
843 | 23 | // An access aligned to its size is guaranteed to be intra-cache-line. |
844 | 23 | // getMemoryAccessFuncIndex has already ruled out a size larger than 16 |
845 | 23 | // and thus larger than a cache line for platforms this tool targets |
846 | 23 | // (and our shadow memory setup assumes 64-byte cache lines). |
847 | 23 | assert(TypeSize <= 128); |
848 | 23 | if (!(TypeSize == 8 || |
849 | 23 | (Alignment % (TypeSize / 8)) == 021 )) { |
850 | 8 | if (ClAssumeIntraCacheLine) |
851 | 4 | ++NumAssumedIntraCacheLine; |
852 | 4 | else |
853 | 4 | return false; |
854 | 19 | } |
855 | 19 | |
856 | 19 | // We inline instrumentation to set the corresponding shadow bits for |
857 | 19 | // each cache line touched by the application. Here we handle a single |
858 | 19 | // load or store where we've already ruled out the possibility that it |
859 | 19 | // might touch more than one cache line and thus we simply update the |
860 | 19 | // shadow memory for a single cache line. |
861 | 19 | // Our shadow memory model is fine with races when manipulating shadow values. |
862 | 19 | // We generate the following code: |
863 | 19 | // |
864 | 19 | // const char BitMask = 0x81; |
865 | 19 | // char *ShadowAddr = appToShadow(AppAddr); |
866 | 19 | // if ((*ShadowAddr & BitMask) != BitMask) |
867 | 19 | // *ShadowAddr |= Bitmask; |
868 | 19 | // |
869 | 19 | Value *AddrPtr = IRB.CreatePointerCast(Addr, IntptrTy); |
870 | 19 | Value *ShadowPtr = appToShadow(AddrPtr, IRB); |
871 | 19 | Type *ShadowTy = IntegerType::get(*Ctx, 8U); |
872 | 19 | Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); |
873 | 19 | // The bottom bit is used for the current sampling period's working set. |
874 | 19 | // The top bit is used for the total working set. We set both on each |
875 | 19 | // memory access, if they are not already set. |
876 | 19 | Value *ValueMask = ConstantInt::get(ShadowTy, 0x81); // 10000001B |
877 | 19 | |
878 | 19 | Value *OldValue = |
879 | 19 | IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); |
880 | 19 | // The AND and CMP will be turned into a TEST instruction by the compiler. |
881 | 19 | Value *Cmp = IRB.CreateICmpNE(IRB.CreateAnd(OldValue, ValueMask), ValueMask); |
882 | 19 | Instruction *CmpTerm = SplitBlockAndInsertIfThen(Cmp, I, false); |
883 | 19 | // FIXME: do I need to call SetCurrentDebugLocation? |
884 | 19 | IRB.SetInsertPoint(CmpTerm); |
885 | 19 | // We use OR to set the shadow bits to avoid corrupting the middle 6 bits, |
886 | 19 | // which are used by the runtime library. |
887 | 19 | Value *NewVal = IRB.CreateOr(OldValue, ValueMask); |
888 | 19 | IRB.CreateStore(NewVal, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); |
889 | 19 | IRB.SetInsertPoint(I); |
890 | 19 | |
891 | 19 | return true; |
892 | 19 | } |