Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- AddressSanitizer.cpp - memory error detector -----------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file is a part of AddressSanitizer, an address sanity checker.
10
// Details of the algorithm:
11
//  https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
12
//
13
//===----------------------------------------------------------------------===//
14
15
#include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
16
#include "llvm/ADT/ArrayRef.h"
17
#include "llvm/ADT/DenseMap.h"
18
#include "llvm/ADT/DepthFirstIterator.h"
19
#include "llvm/ADT/SmallPtrSet.h"
20
#include "llvm/ADT/SmallVector.h"
21
#include "llvm/ADT/Statistic.h"
22
#include "llvm/ADT/StringExtras.h"
23
#include "llvm/ADT/StringRef.h"
24
#include "llvm/ADT/Triple.h"
25
#include "llvm/ADT/Twine.h"
26
#include "llvm/Analysis/MemoryBuiltins.h"
27
#include "llvm/Analysis/TargetLibraryInfo.h"
28
#include "llvm/Analysis/ValueTracking.h"
29
#include "llvm/BinaryFormat/MachO.h"
30
#include "llvm/IR/Argument.h"
31
#include "llvm/IR/Attributes.h"
32
#include "llvm/IR/BasicBlock.h"
33
#include "llvm/IR/CallSite.h"
34
#include "llvm/IR/Comdat.h"
35
#include "llvm/IR/Constant.h"
36
#include "llvm/IR/Constants.h"
37
#include "llvm/IR/DIBuilder.h"
38
#include "llvm/IR/DataLayout.h"
39
#include "llvm/IR/DebugInfoMetadata.h"
40
#include "llvm/IR/DebugLoc.h"
41
#include "llvm/IR/DerivedTypes.h"
42
#include "llvm/IR/Dominators.h"
43
#include "llvm/IR/Function.h"
44
#include "llvm/IR/GlobalAlias.h"
45
#include "llvm/IR/GlobalValue.h"
46
#include "llvm/IR/GlobalVariable.h"
47
#include "llvm/IR/IRBuilder.h"
48
#include "llvm/IR/InlineAsm.h"
49
#include "llvm/IR/InstVisitor.h"
50
#include "llvm/IR/InstrTypes.h"
51
#include "llvm/IR/Instruction.h"
52
#include "llvm/IR/Instructions.h"
53
#include "llvm/IR/IntrinsicInst.h"
54
#include "llvm/IR/Intrinsics.h"
55
#include "llvm/IR/LLVMContext.h"
56
#include "llvm/IR/MDBuilder.h"
57
#include "llvm/IR/Metadata.h"
58
#include "llvm/IR/Module.h"
59
#include "llvm/IR/Type.h"
60
#include "llvm/IR/Use.h"
61
#include "llvm/IR/Value.h"
62
#include "llvm/MC/MCSectionMachO.h"
63
#include "llvm/Pass.h"
64
#include "llvm/Support/Casting.h"
65
#include "llvm/Support/CommandLine.h"
66
#include "llvm/Support/Debug.h"
67
#include "llvm/Support/ErrorHandling.h"
68
#include "llvm/Support/MathExtras.h"
69
#include "llvm/Support/ScopedPrinter.h"
70
#include "llvm/Support/raw_ostream.h"
71
#include "llvm/Transforms/Instrumentation.h"
72
#include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
73
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
74
#include "llvm/Transforms/Utils/Local.h"
75
#include "llvm/Transforms/Utils/ModuleUtils.h"
76
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
77
#include <algorithm>
78
#include <cassert>
79
#include <cstddef>
80
#include <cstdint>
81
#include <iomanip>
82
#include <limits>
83
#include <memory>
84
#include <sstream>
85
#include <string>
86
#include <tuple>
87
88
using namespace llvm;
89
90
#define DEBUG_TYPE "asan"
91
92
static const uint64_t kDefaultShadowScale = 3;
93
static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
94
static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
95
static const uint64_t kDynamicShadowSentinel =
96
    std::numeric_limits<uint64_t>::max();
97
static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF;  // < 2G.
98
static const uint64_t kSmallX86_64ShadowOffsetAlignMask = ~0xFFFULL;
99
static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
100
static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
101
static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
102
static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
103
static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
104
static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
105
static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
106
static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
107
static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
108
static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
109
static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
110
static const uint64_t kPS4CPU_ShadowOffset64 = 1ULL << 40;
111
static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
112
static const uint64_t kEmscriptenShadowOffset = 0;
113
114
static const uint64_t kMyriadShadowScale = 5;
115
static const uint64_t kMyriadMemoryOffset32 = 0x80000000ULL;
116
static const uint64_t kMyriadMemorySize32 = 0x20000000ULL;
117
static const uint64_t kMyriadTagShift = 29;
118
static const uint64_t kMyriadDDRTag = 4;
119
static const uint64_t kMyriadCacheBitMask32 = 0x40000000ULL;
120
121
// The shadow memory space is dynamically allocated.
122
static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel;
123
124
static const size_t kMinStackMallocSize = 1 << 6;   // 64B
125
static const size_t kMaxStackMallocSize = 1 << 16;  // 64K
126
static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
127
static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
128
129
static const char *const kAsanModuleCtorName = "asan.module_ctor";
130
static const char *const kAsanModuleDtorName = "asan.module_dtor";
131
static const uint64_t kAsanCtorAndDtorPriority = 1;
132
static const char *const kAsanReportErrorTemplate = "__asan_report_";
133
static const char *const kAsanRegisterGlobalsName = "__asan_register_globals";
134
static const char *const kAsanUnregisterGlobalsName =
135
    "__asan_unregister_globals";
136
static const char *const kAsanRegisterImageGlobalsName =
137
  "__asan_register_image_globals";
138
static const char *const kAsanUnregisterImageGlobalsName =
139
  "__asan_unregister_image_globals";
140
static const char *const kAsanRegisterElfGlobalsName =
141
  "__asan_register_elf_globals";
142
static const char *const kAsanUnregisterElfGlobalsName =
143
  "__asan_unregister_elf_globals";
144
static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init";
145
static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init";
146
static const char *const kAsanInitName = "__asan_init";
147
static const char *const kAsanVersionCheckNamePrefix =
148
    "__asan_version_mismatch_check_v";
149
static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp";
150
static const char *const kAsanPtrSub = "__sanitizer_ptr_sub";
151
static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return";
152
static const int kMaxAsanStackMallocSizeClass = 10;
153
static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_";
154
static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_";
155
static const char *const kAsanGenPrefix = "___asan_gen_";
156
static const char *const kODRGenPrefix = "__odr_asan_gen_";
157
static const char *const kSanCovGenPrefix = "__sancov_gen_";
158
static const char *const kAsanSetShadowPrefix = "__asan_set_shadow_";
159
static const char *const kAsanPoisonStackMemoryName =
160
    "__asan_poison_stack_memory";
161
static const char *const kAsanUnpoisonStackMemoryName =
162
    "__asan_unpoison_stack_memory";
163
164
// ASan version script has __asan_* wildcard. Triple underscore prevents a
165
// linker (gold) warning about attempting to export a local symbol.
166
static const char *const kAsanGlobalsRegisteredFlagName =
167
    "___asan_globals_registered";
168
169
static const char *const kAsanOptionDetectUseAfterReturn =
170
    "__asan_option_detect_stack_use_after_return";
171
172
static const char *const kAsanShadowMemoryDynamicAddress =
173
    "__asan_shadow_memory_dynamic_address";
174
175
static const char *const kAsanAllocaPoison = "__asan_alloca_poison";
176
static const char *const kAsanAllocasUnpoison = "__asan_allocas_unpoison";
177
178
// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
179
static const size_t kNumberOfAccessSizes = 5;
180
181
static const unsigned kAllocaRzSize = 32;
182
183
// Command-line flags.
184
185
static cl::opt<bool> ClEnableKasan(
186
    "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
187
    cl::Hidden, cl::init(false));
188
189
static cl::opt<bool> ClRecover(
190
    "asan-recover",
191
    cl::desc("Enable recovery mode (continue-after-error)."),
192
    cl::Hidden, cl::init(false));
193
194
// This flag may need to be replaced with -f[no-]asan-reads.
195
static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
196
                                       cl::desc("instrument read instructions"),
197
                                       cl::Hidden, cl::init(true));
198
199
static cl::opt<bool> ClInstrumentWrites(
200
    "asan-instrument-writes", cl::desc("instrument write instructions"),
201
    cl::Hidden, cl::init(true));
202
203
static cl::opt<bool> ClInstrumentAtomics(
204
    "asan-instrument-atomics",
205
    cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
206
    cl::init(true));
207
208
static cl::opt<bool> ClAlwaysSlowPath(
209
    "asan-always-slow-path",
210
    cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
211
    cl::init(false));
212
213
static cl::opt<bool> ClForceDynamicShadow(
214
    "asan-force-dynamic-shadow",
215
    cl::desc("Load shadow address into a local variable for each function"),
216
    cl::Hidden, cl::init(false));
217
218
static cl::opt<bool>
219
    ClWithIfunc("asan-with-ifunc",
220
                cl::desc("Access dynamic shadow through an ifunc global on "
221
                         "platforms that support this"),
222
                cl::Hidden, cl::init(true));
223
224
static cl::opt<bool> ClWithIfuncSuppressRemat(
225
    "asan-with-ifunc-suppress-remat",
226
    cl::desc("Suppress rematerialization of dynamic shadow address by passing "
227
             "it through inline asm in prologue."),
228
    cl::Hidden, cl::init(true));
229
230
// This flag limits the number of instructions to be instrumented
231
// in any given BB. Normally, this should be set to unlimited (INT_MAX),
232
// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
233
// set it to 10000.
234
static cl::opt<int> ClMaxInsnsToInstrumentPerBB(
235
    "asan-max-ins-per-bb", cl::init(10000),
236
    cl::desc("maximal number of instructions to instrument in any given BB"),
237
    cl::Hidden);
238
239
// This flag may need to be replaced with -f[no]asan-stack.
240
static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
241
                             cl::Hidden, cl::init(true));
242
static cl::opt<uint32_t> ClMaxInlinePoisoningSize(
243
    "asan-max-inline-poisoning-size",
244
    cl::desc(
245
        "Inline shadow poisoning for blocks up to the given size in bytes."),
246
    cl::Hidden, cl::init(64));
247
248
static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
249
                                      cl::desc("Check stack-use-after-return"),
250
                                      cl::Hidden, cl::init(true));
251
252
static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
253
                                        cl::desc("Create redzones for byval "
254
                                                 "arguments (extra copy "
255
                                                 "required)"), cl::Hidden,
256
                                        cl::init(true));
257
258
static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
259
                                     cl::desc("Check stack-use-after-scope"),
260
                                     cl::Hidden, cl::init(false));
261
262
// This flag may need to be replaced with -f[no]asan-globals.
263
static cl::opt<bool> ClGlobals("asan-globals",
264
                               cl::desc("Handle global objects"), cl::Hidden,
265
                               cl::init(true));
266
267
static cl::opt<bool> ClInitializers("asan-initialization-order",
268
                                    cl::desc("Handle C++ initializer order"),
269
                                    cl::Hidden, cl::init(true));
270
271
static cl::opt<bool> ClInvalidPointerPairs(
272
    "asan-detect-invalid-pointer-pair",
273
    cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
274
    cl::init(false));
275
276
static cl::opt<bool> ClInvalidPointerCmp(
277
    "asan-detect-invalid-pointer-cmp",
278
    cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
279
    cl::init(false));
280
281
static cl::opt<bool> ClInvalidPointerSub(
282
    "asan-detect-invalid-pointer-sub",
283
    cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
284
    cl::init(false));
285
286
static cl::opt<unsigned> ClRealignStack(
287
    "asan-realign-stack",
288
    cl::desc("Realign stack to the value of this flag (power of two)"),
289
    cl::Hidden, cl::init(32));
290
291
static cl::opt<int> ClInstrumentationWithCallsThreshold(
292
    "asan-instrumentation-with-call-threshold",
293
    cl::desc(
294
        "If the function being instrumented contains more than "
295
        "this number of memory accesses, use callbacks instead of "
296
        "inline checks (-1 means never use callbacks)."),
297
    cl::Hidden, cl::init(7000));
298
299
static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
300
    "asan-memory-access-callback-prefix",
301
    cl::desc("Prefix for memory access callbacks"), cl::Hidden,
302
    cl::init("__asan_"));
303
304
static cl::opt<bool>
305
    ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
306
                               cl::desc("instrument dynamic allocas"),
307
                               cl::Hidden, cl::init(true));
308
309
static cl::opt<bool> ClSkipPromotableAllocas(
310
    "asan-skip-promotable-allocas",
311
    cl::desc("Do not instrument promotable allocas"), cl::Hidden,
312
    cl::init(true));
313
314
// These flags allow to change the shadow mapping.
315
// The shadow mapping looks like
316
//    Shadow = (Mem >> scale) + offset
317
318
static cl::opt<int> ClMappingScale("asan-mapping-scale",
319
                                   cl::desc("scale of asan shadow mapping"),
320
                                   cl::Hidden, cl::init(0));
321
322
static cl::opt<uint64_t>
323
    ClMappingOffset("asan-mapping-offset",
324
                    cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
325
                    cl::Hidden, cl::init(0));
326
327
// Optimization flags. Not user visible, used mostly for testing
328
// and benchmarking the tool.
329
330
static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
331
                           cl::Hidden, cl::init(true));
332
333
static cl::opt<bool> ClOptSameTemp(
334
    "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
335
    cl::Hidden, cl::init(true));
336
337
static cl::opt<bool> ClOptGlobals("asan-opt-globals",
338
                                  cl::desc("Don't instrument scalar globals"),
339
                                  cl::Hidden, cl::init(true));
340
341
static cl::opt<bool> ClOptStack(
342
    "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
343
    cl::Hidden, cl::init(false));
344
345
static cl::opt<bool> ClDynamicAllocaStack(
346
    "asan-stack-dynamic-alloca",
347
    cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
348
    cl::init(true));
349
350
static cl::opt<uint32_t> ClForceExperiment(
351
    "asan-force-experiment",
352
    cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
353
    cl::init(0));
354
355
static cl::opt<bool>
356
    ClUsePrivateAlias("asan-use-private-alias",
357
                      cl::desc("Use private aliases for global variables"),
358
                      cl::Hidden, cl::init(false));
359
360
static cl::opt<bool>
361
    ClUseOdrIndicator("asan-use-odr-indicator",
362
                      cl::desc("Use odr indicators to improve ODR reporting"),
363
                      cl::Hidden, cl::init(false));
364
365
static cl::opt<bool>
366
    ClUseGlobalsGC("asan-globals-live-support",
367
                   cl::desc("Use linker features to support dead "
368
                            "code stripping of globals"),
369
                   cl::Hidden, cl::init(true));
370
371
// This is on by default even though there is a bug in gold:
372
// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
373
static cl::opt<bool>
374
    ClWithComdat("asan-with-comdat",
375
                 cl::desc("Place ASan constructors in comdat sections"),
376
                 cl::Hidden, cl::init(true));
377
378
// Debug flags.
379
380
static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
381
                            cl::init(0));
382
383
static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
384
                                 cl::Hidden, cl::init(0));
385
386
static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden,
387
                                        cl::desc("Debug func"));
388
389
static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
390
                               cl::Hidden, cl::init(-1));
391
392
static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
393
                               cl::Hidden, cl::init(-1));
394
395
STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
396
STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
397
STATISTIC(NumOptimizedAccessesToGlobalVar,
398
          "Number of optimized accesses to global vars");
399
STATISTIC(NumOptimizedAccessesToStackVar,
400
          "Number of optimized accesses to stack vars");
401
402
namespace {
403
404
/// This struct defines the shadow mapping using the rule:
405
///   shadow = (mem >> Scale) ADD-or-OR Offset.
406
/// If InGlobal is true, then
407
///   extern char __asan_shadow[];
408
///   shadow = (mem >> Scale) + &__asan_shadow
409
struct ShadowMapping {
410
  int Scale;
411
  uint64_t Offset;
412
  bool OrShadowOffset;
413
  bool InGlobal;
414
};
415
416
} // end anonymous namespace
417
418
static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize,
419
1.01k
                                      bool IsKasan) {
420
1.01k
  bool IsAndroid = TargetTriple.isAndroid();
421
1.01k
  bool IsIOS = TargetTriple.isiOS() || 
TargetTriple.isWatchOS()1.00k
;
422
1.01k
  bool IsFreeBSD = TargetTriple.isOSFreeBSD();
423
1.01k
  bool IsNetBSD = TargetTriple.isOSNetBSD();
424
1.01k
  bool IsPS4CPU = TargetTriple.isPS4CPU();
425
1.01k
  bool IsLinux = TargetTriple.isOSLinux();
426
1.01k
  bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
427
1.01k
                 
TargetTriple.getArch() == Triple::ppc64le1.00k
;
428
1.01k
  bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
429
1.01k
  bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
430
1.01k
  bool IsMIPS32 = TargetTriple.isMIPS32();
431
1.01k
  bool IsMIPS64 = TargetTriple.isMIPS64();
432
1.01k
  bool IsArmOrThumb = TargetTriple.isARM() || 
TargetTriple.isThumb()996
;
433
1.01k
  bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64;
434
1.01k
  bool IsWindows = TargetTriple.isOSWindows();
435
1.01k
  bool IsFuchsia = TargetTriple.isOSFuchsia();
436
1.01k
  bool IsMyriad = TargetTriple.getVendor() == llvm::Triple::Myriad;
437
1.01k
  bool IsEmscripten = TargetTriple.isOSEmscripten();
438
1.01k
439
1.01k
  ShadowMapping Mapping;
440
1.01k
441
1.01k
  Mapping.Scale = IsMyriad ? 
kMyriadShadowScale3
:
kDefaultShadowScale1.00k
;
442
1.01k
  if (ClMappingScale.getNumOccurrences() > 0) {
443
77
    Mapping.Scale = ClMappingScale;
444
77
  }
445
1.01k
446
1.01k
  if (LongSize == 32) {
447
77
    if (IsAndroid)
448
12
      Mapping.Offset = kDynamicShadowSentinel;
449
65
    else if (IsMIPS32)
450
0
      Mapping.Offset = kMIPS32_ShadowOffset32;
451
65
    else if (IsFreeBSD)
452
2
      Mapping.Offset = kFreeBSD_ShadowOffset32;
453
63
    else if (IsNetBSD)
454
0
      Mapping.Offset = kNetBSD_ShadowOffset32;
455
63
    else if (IsIOS)
456
10
      Mapping.Offset = kDynamicShadowSentinel;
457
53
    else if (IsWindows)
458
12
      Mapping.Offset = kWindowsShadowOffset32;
459
41
    else if (IsEmscripten)
460
0
      Mapping.Offset = kEmscriptenShadowOffset;
461
41
    else if (IsMyriad) {
462
3
      uint64_t ShadowOffset = (kMyriadMemoryOffset32 + kMyriadMemorySize32 -
463
3
                               (kMyriadMemorySize32 >> Mapping.Scale));
464
3
      Mapping.Offset = ShadowOffset - (kMyriadMemoryOffset32 >> Mapping.Scale);
465
3
    }
466
38
    else
467
38
      Mapping.Offset = kDefaultShadowOffset32;
468
935
  } else {  // LongSize == 64
469
935
    // Fuchsia is always PIE, which means that the beginning of the address
470
935
    // space is always available.
471
935
    if (IsFuchsia)
472
0
      Mapping.Offset = 0;
473
935
    else if (IsPPC64)
474
4
      Mapping.Offset = kPPC64_ShadowOffset64;
475
931
    else if (IsSystemZ)
476
0
      Mapping.Offset = kSystemZ_ShadowOffset64;
477
931
    else if (IsFreeBSD && 
!IsMIPS644
)
478
2
      Mapping.Offset = kFreeBSD_ShadowOffset64;
479
929
    else if (IsNetBSD) {
480
0
      if (IsKasan)
481
0
        Mapping.Offset = kNetBSDKasan_ShadowOffset64;
482
0
      else
483
0
        Mapping.Offset = kNetBSD_ShadowOffset64;
484
929
    } else if (IsPS4CPU)
485
2
      Mapping.Offset = kPS4CPU_ShadowOffset64;
486
927
    else if (IsLinux && 
IsX86_64392
) {
487
392
      if (IsKasan)
488
4
        Mapping.Offset = kLinuxKasan_ShadowOffset64;
489
388
      else
490
388
        Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
491
388
                          (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
492
535
    } else if (IsWindows && 
IsX86_6421
) {
493
21
      Mapping.Offset = kWindowsShadowOffset64;
494
514
    } else if (IsMIPS64)
495
2
      Mapping.Offset = kMIPS64_ShadowOffset64;
496
512
    else if (IsIOS)
497
4
      Mapping.Offset = kDynamicShadowSentinel;
498
508
    else if (IsAArch64)
499
0
      Mapping.Offset = kAArch64_ShadowOffset64;
500
508
    else
501
508
      Mapping.Offset = kDefaultShadowOffset64;
502
935
  }
503
1.01k
504
1.01k
  if (ClForceDynamicShadow) {
505
6
    Mapping.Offset = kDynamicShadowSentinel;
506
6
  }
507
1.01k
508
1.01k
  if (ClMappingOffset.getNumOccurrences() > 0) {
509
8
    Mapping.Offset = ClMappingOffset;
510
8
  }
511
1.01k
512
1.01k
  // OR-ing shadow offset if more efficient (at least on x86) if the offset
513
1.01k
  // is a power of two, but on ppc64 we have to use add since the shadow
514
1.01k
  // offset is not necessary 1/8-th of the address space.  On SystemZ,
515
1.01k
  // we could OR the constant in a single instruction, but it's more
516
1.01k
  // efficient to load it once and use indexed addressing.
517
1.01k
  Mapping.OrShadowOffset = !IsAArch64 && 
!IsPPC641.01k
&&
!IsSystemZ1.00k
&&
!IsPS4CPU1.00k
&&
518
1.01k
                           
!(Mapping.Offset & (Mapping.Offset - 1))1.00k
&&
519
1.01k
                           
Mapping.Offset != kDynamicShadowSentinel552
;
520
1.01k
  bool IsAndroidWithIfuncSupport =
521
1.01k
      IsAndroid && 
!TargetTriple.isAndroidVersionLT(21)12
;
522
1.01k
  Mapping.InGlobal = ClWithIfunc && 
IsAndroidWithIfuncSupport1.01k
&&
IsArmOrThumb6
;
523
1.01k
524
1.01k
  return Mapping;
525
1.01k
}
526
527
612
static size_t RedzoneSizeForScale(int MappingScale) {
528
612
  // Redzone used for stack and globals is at least 32 bytes.
529
612
  // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
530
612
  return std::max(32U, 1U << MappingScale);
531
612
}
532
533
namespace {
534
535
/// Module analysis for getting various metadata about the module.
536
class ASanGlobalsMetadataWrapperPass : public ModulePass {
537
public:
538
  static char ID;
539
540
351
  ASanGlobalsMetadataWrapperPass() : ModulePass(ID) {
541
351
    initializeASanGlobalsMetadataWrapperPassPass(
542
351
        *PassRegistry::getPassRegistry());
543
351
  }
544
545
352
  bool runOnModule(Module &M) override {
546
352
    GlobalsMD = GlobalsMetadata(M);
547
352
    return false;
548
352
  }
549
550
0
  StringRef getPassName() const override {
551
0
    return "ASanGlobalsMetadataWrapperPass";
552
0
  }
553
554
351
  void getAnalysisUsage(AnalysisUsage &AU) const override {
555
351
    AU.setPreservesAll();
556
351
  }
557
558
971
  GlobalsMetadata &getGlobalsMD() { return GlobalsMD; }
559
560
private:
561
  GlobalsMetadata GlobalsMD;
562
};
563
564
char ASanGlobalsMetadataWrapperPass::ID = 0;
565
566
/// AddressSanitizer: instrument the code in module to find memory bugs.
567
struct AddressSanitizer {
568
  AddressSanitizer(Module &M, GlobalsMetadata &GlobalsMD,
569
                   bool CompileKernel = false, bool Recover = false,
570
                   bool UseAfterScope = false)
571
839
      : UseAfterScope(UseAfterScope || ClUseAfterScope), GlobalsMD(GlobalsMD) {
572
839
    this->Recover = ClRecover.getNumOccurrences() > 0 ? 
ClRecover3
:
Recover836
;
573
839
    this->CompileKernel =
574
839
        ClEnableKasan.getNumOccurrences() > 0 ? 
ClEnableKasan2
:
CompileKernel837
;
575
839
576
839
    C = &(M.getContext());
577
839
    LongSize = M.getDataLayout().getPointerSizeInBits();
578
839
    IntptrTy = Type::getIntNTy(*C, LongSize);
579
839
    TargetTriple = Triple(M.getTargetTriple());
580
839
581
839
    Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
582
839
  }
583
584
687
  uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const {
585
687
    uint64_t ArraySize = 1;
586
687
    if (AI.isArrayAllocation()) {
587
0
      const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
588
0
      assert(CI && "non-constant array size");
589
0
      ArraySize = CI->getZExtValue();
590
0
    }
591
687
    Type *Ty = AI.getAllocatedType();
592
687
    uint64_t SizeInBytes =
593
687
        AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
594
687
    return SizeInBytes * ArraySize;
595
687
  }
596
597
  /// Check if we want (and can) handle this alloca.
598
  bool isInterestingAlloca(const AllocaInst &AI);
599
600
  /// If it is an interesting memory access, return the PointerOperand
601
  /// and set IsWrite/Alignment. Otherwise return nullptr.
602
  /// MaybeMask is an output parameter for the mask Value, if we're looking at a
603
  /// masked load/store.
604
  Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
605
                                   uint64_t *TypeSize, unsigned *Alignment,
606
                                   Value **MaybeMask = nullptr);
607
608
  void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I,
609
                     bool UseCalls, const DataLayout &DL);
610
  void instrumentPointerComparisonOrSubtraction(Instruction *I);
611
  void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
612
                         Value *Addr, uint32_t TypeSize, bool IsWrite,
613
                         Value *SizeArgument, bool UseCalls, uint32_t Exp);
614
  void instrumentUnusualSizeOrAlignment(Instruction *I,
615
                                        Instruction *InsertBefore, Value *Addr,
616
                                        uint32_t TypeSize, bool IsWrite,
617
                                        Value *SizeArgument, bool UseCalls,
618
                                        uint32_t Exp);
619
  Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
620
                           Value *ShadowValue, uint32_t TypeSize);
621
  Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
622
                                 bool IsWrite, size_t AccessSizeIndex,
623
                                 Value *SizeArgument, uint32_t Exp);
624
  void instrumentMemIntrinsic(MemIntrinsic *MI);
625
  Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
626
  bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
627
  bool maybeInsertAsanInitAtFunctionEntry(Function &F);
628
  void maybeInsertDynamicShadowAtFunctionEntry(Function &F);
629
  void markEscapedLocalAllocas(Function &F);
630
631
private:
632
  friend struct FunctionStackPoisoner;
633
634
  void initializeCallbacks(Module &M);
635
636
  bool LooksLikeCodeInBug11395(Instruction *I);
637
  bool GlobalIsLinkerInitialized(GlobalVariable *G);
638
  bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
639
                    uint64_t TypeSize) const;
640
641
  /// Helper to cleanup per-function state.
642
  struct FunctionStateRAII {
643
    AddressSanitizer *Pass;
644
645
762
    FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
646
762
      assert(Pass->ProcessedAllocas.empty() &&
647
762
             "last pass forgot to clear cache");
648
762
      assert(!Pass->LocalDynamicShadow);
649
762
    }
650
651
762
    ~FunctionStateRAII() {
652
762
      Pass->LocalDynamicShadow = nullptr;
653
762
      Pass->ProcessedAllocas.clear();
654
762
    }
655
  };
656
657
  LLVMContext *C;
658
  Triple TargetTriple;
659
  int LongSize;
660
  bool CompileKernel;
661
  bool Recover;
662
  bool UseAfterScope;
663
  Type *IntptrTy;
664
  ShadowMapping Mapping;
665
  FunctionCallee AsanHandleNoReturnFunc;
666
  FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
667
  Constant *AsanShadowGlobal;
668
669
  // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
670
  FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
671
  FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
672
673
  // These arrays is indexed by AccessIsWrite and Experiment.
674
  FunctionCallee AsanErrorCallbackSized[2][2];
675
  FunctionCallee AsanMemoryAccessCallbackSized[2][2];
676
677
  FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
678
  InlineAsm *EmptyAsm;
679
  Value *LocalDynamicShadow = nullptr;
680
  GlobalsMetadata GlobalsMD;
681
  DenseMap<const AllocaInst *, bool> ProcessedAllocas;
682
};
683
684
class AddressSanitizerLegacyPass : public FunctionPass {
685
public:
686
  static char ID;
687
688
  explicit AddressSanitizerLegacyPass(bool CompileKernel = false,
689
                                      bool Recover = false,
690
                                      bool UseAfterScope = false)
691
      : FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover),
692
186
        UseAfterScope(UseAfterScope) {
693
186
    initializeAddressSanitizerLegacyPassPass(*PassRegistry::getPassRegistry());
694
186
  }
695
696
806
  StringRef getPassName() const override {
697
806
    return "AddressSanitizerFunctionPass";
698
806
  }
699
700
186
  void getAnalysisUsage(AnalysisUsage &AU) const override {
701
186
    AU.addRequired<ASanGlobalsMetadataWrapperPass>();
702
186
    AU.addRequired<TargetLibraryInfoWrapperPass>();
703
186
  }
704
705
806
  bool runOnFunction(Function &F) override {
706
806
    GlobalsMetadata &GlobalsMD =
707
806
        getAnalysis<ASanGlobalsMetadataWrapperPass>().getGlobalsMD();
708
806
    const TargetLibraryInfo *TLI =
709
806
        &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
710
806
    AddressSanitizer ASan(*F.getParent(), GlobalsMD, CompileKernel, Recover,
711
806
                          UseAfterScope);
712
806
    return ASan.instrumentFunction(F, TLI);
713
806
  }
714
715
private:
716
  bool CompileKernel;
717
  bool Recover;
718
  bool UseAfterScope;
719
};
720
721
class ModuleAddressSanitizer {
722
public:
723
  ModuleAddressSanitizer(Module &M, GlobalsMetadata &GlobalsMD,
724
                         bool CompileKernel = false, bool Recover = false,
725
                         bool UseGlobalsGC = true, bool UseOdrIndicator = false)
726
      : GlobalsMD(GlobalsMD), UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC),
727
        // Enable aliases as they should have no downside with ODR indicators.
728
        UsePrivateAlias(UseOdrIndicator || ClUsePrivateAlias),
729
        UseOdrIndicator(UseOdrIndicator || ClUseOdrIndicator),
730
        // Not a typo: ClWithComdat is almost completely pointless without
731
        // ClUseGlobalsGC (because then it only works on modules without
732
        // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
733
        // and both suffer from gold PR19002 for which UseGlobalsGC constructor
734
        // argument is designed as workaround. Therefore, disable both
735
        // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
736
        // do globals-gc.
737
173
        UseCtorComdat(UseGlobalsGC && ClWithComdat) {
738
173
    this->Recover = ClRecover.getNumOccurrences() > 0 ? 
ClRecover1
:
Recover172
;
739
173
    this->CompileKernel =
740
173
        ClEnableKasan.getNumOccurrences() > 0 ? 
ClEnableKasan0
: CompileKernel;
741
173
742
173
    C = &(M.getContext());
743
173
    int LongSize = M.getDataLayout().getPointerSizeInBits();
744
173
    IntptrTy = Type::getIntNTy(*C, LongSize);
745
173
    TargetTriple = Triple(M.getTargetTriple());
746
173
    Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
747
173
  }
748
749
  bool instrumentModule(Module &);
750
751
private:
752
  void initializeCallbacks(Module &M);
753
754
  bool InstrumentGlobals(IRBuilder<> &IRB, Module &M, bool *CtorComdat);
755
  void InstrumentGlobalsCOFF(IRBuilder<> &IRB, Module &M,
756
                             ArrayRef<GlobalVariable *> ExtendedGlobals,
757
                             ArrayRef<Constant *> MetadataInitializers);
758
  void InstrumentGlobalsELF(IRBuilder<> &IRB, Module &M,
759
                            ArrayRef<GlobalVariable *> ExtendedGlobals,
760
                            ArrayRef<Constant *> MetadataInitializers,
761
                            const std::string &UniqueModuleId);
762
  void InstrumentGlobalsMachO(IRBuilder<> &IRB, Module &M,
763
                              ArrayRef<GlobalVariable *> ExtendedGlobals,
764
                              ArrayRef<Constant *> MetadataInitializers);
765
  void
766
  InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB, Module &M,
767
                                     ArrayRef<GlobalVariable *> ExtendedGlobals,
768
                                     ArrayRef<Constant *> MetadataInitializers);
769
770
  GlobalVariable *CreateMetadataGlobal(Module &M, Constant *Initializer,
771
                                       StringRef OriginalName);
772
  void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
773
                                  StringRef InternalSuffix);
774
  IRBuilder<> CreateAsanModuleDtor(Module &M);
775
776
  bool ShouldInstrumentGlobal(GlobalVariable *G);
777
  bool ShouldUseMachOGlobalsSection() const;
778
  StringRef getGlobalMetadataSection() const;
779
  void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
780
  void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
781
612
  size_t MinRedzoneSizeForGlobal() const {
782
612
    return RedzoneSizeForScale(Mapping.Scale);
783
612
  }
784
  int GetAsanVersion(const Module &M) const;
785
786
  GlobalsMetadata GlobalsMD;
787
  bool CompileKernel;
788
  bool Recover;
789
  bool UseGlobalsGC;
790
  bool UsePrivateAlias;
791
  bool UseOdrIndicator;
792
  bool UseCtorComdat;
793
  Type *IntptrTy;
794
  LLVMContext *C;
795
  Triple TargetTriple;
796
  ShadowMapping Mapping;
797
  FunctionCallee AsanPoisonGlobals;
798
  FunctionCallee AsanUnpoisonGlobals;
799
  FunctionCallee AsanRegisterGlobals;
800
  FunctionCallee AsanUnregisterGlobals;
801
  FunctionCallee AsanRegisterImageGlobals;
802
  FunctionCallee AsanUnregisterImageGlobals;
803
  FunctionCallee AsanRegisterElfGlobals;
804
  FunctionCallee AsanUnregisterElfGlobals;
805
806
  Function *AsanCtorFunction = nullptr;
807
  Function *AsanDtorFunction = nullptr;
808
};
809
810
class ModuleAddressSanitizerLegacyPass : public ModulePass {
811
public:
812
  static char ID;
813
814
  explicit ModuleAddressSanitizerLegacyPass(bool CompileKernel = false,
815
                                            bool Recover = false,
816
                                            bool UseGlobalGC = true,
817
                                            bool UseOdrIndicator = false)
818
      : ModulePass(ID), CompileKernel(CompileKernel), Recover(Recover),
819
165
        UseGlobalGC(UseGlobalGC), UseOdrIndicator(UseOdrIndicator) {
820
165
    initializeModuleAddressSanitizerLegacyPassPass(
821
165
        *PassRegistry::getPassRegistry());
822
165
  }
823
824
0
  StringRef getPassName() const override { return "ModuleAddressSanitizer"; }
825
826
165
  void getAnalysisUsage(AnalysisUsage &AU) const override {
827
165
    AU.addRequired<ASanGlobalsMetadataWrapperPass>();
828
165
  }
829
830
165
  bool runOnModule(Module &M) override {
831
165
    GlobalsMetadata &GlobalsMD =
832
165
        getAnalysis<ASanGlobalsMetadataWrapperPass>().getGlobalsMD();
833
165
    ModuleAddressSanitizer ASanModule(M, GlobalsMD, CompileKernel, Recover,
834
165
                                      UseGlobalGC, UseOdrIndicator);
835
165
    return ASanModule.instrumentModule(M);
836
165
  }
837
838
private:
839
  bool CompileKernel;
840
  bool Recover;
841
  bool UseGlobalGC;
842
  bool UseOdrIndicator;
843
};
844
845
// Stack poisoning does not play well with exception handling.
846
// When an exception is thrown, we essentially bypass the code
847
// that unpoisones the stack. This is why the run-time library has
848
// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
849
// stack in the interceptor. This however does not work inside the
850
// actual function which catches the exception. Most likely because the
851
// compiler hoists the load of the shadow value somewhere too high.
852
// This causes asan to report a non-existing bug on 453.povray.
853
// It sounds like an LLVM bug.
854
struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
855
  Function &F;
856
  AddressSanitizer &ASan;
857
  DIBuilder DIB;
858
  LLVMContext *C;
859
  Type *IntptrTy;
860
  Type *IntptrPtrTy;
861
  ShadowMapping Mapping;
862
863
  SmallVector<AllocaInst *, 16> AllocaVec;
864
  SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
865
  SmallVector<Instruction *, 8> RetVec;
866
  unsigned StackAlignment;
867
868
  FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
869
      AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
870
  FunctionCallee AsanSetShadowFunc[0x100] = {};
871
  FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
872
  FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
873
874
  // Stores a place and arguments of poisoning/unpoisoning call for alloca.
875
  struct AllocaPoisonCall {
876
    IntrinsicInst *InsBefore;
877
    AllocaInst *AI;
878
    uint64_t Size;
879
    bool DoPoison;
880
  };
881
  SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
882
  SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
883
  bool HasUntracedLifetimeIntrinsic = false;
884
885
  SmallVector<AllocaInst *, 1> DynamicAllocaVec;
886
  SmallVector<IntrinsicInst *, 1> StackRestoreVec;
887
  AllocaInst *DynamicAllocaLayout = nullptr;
888
  IntrinsicInst *LocalEscapeCall = nullptr;
889
890
  // Maps Value to an AllocaInst from which the Value is originated.
891
  using AllocaForValueMapTy = DenseMap<Value *, AllocaInst *>;
892
  AllocaForValueMapTy AllocaForValue;
893
894
  bool HasNonEmptyInlineAsm = false;
895
  bool HasReturnsTwiceCall = false;
896
  std::unique_ptr<CallInst> EmptyInlineAsm;
897
898
  FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
899
      : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false),
900
        C(ASan.C), IntptrTy(ASan.IntptrTy),
901
        IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping),
902
        StackAlignment(1 << Mapping.Scale),
903
759
        EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {}
904
905
759
  bool runOnFunction() {
906
759
    if (!ClStack) 
return false0
;
907
759
908
759
    if (ClRedzoneByvalArgs)
909
759
      copyArgsPassedByValToAllocas();
910
759
911
759
    // Collect alloca, ret, lifetime instructions etc.
912
1.91k
    for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
913
759
914
759
    if (AllocaVec.empty() && 
DynamicAllocaVec.empty()672
)
return false672
;
915
87
916
87
    initializeCallbacks(*F.getParent());
917
87
918
87
    if (HasUntracedLifetimeIntrinsic) {
919
1
      // If there are lifetime intrinsics which couldn't be traced back to an
920
1
      // alloca, we may not know exactly when a variable enters scope, and
921
1
      // therefore should "fail safe" by not poisoning them.
922
1
      StaticAllocaPoisonCallVec.clear();
923
1
      DynamicAllocaPoisonCallVec.clear();
924
1
    }
925
87
926
87
    processDynamicAllocas();
927
87
    processStaticAllocas();
928
87
929
87
    if (ClDebugStack) {
930
0
      LLVM_DEBUG(dbgs() << F);
931
0
    }
932
87
    return true;
933
87
  }
934
935
  // Arguments marked with the "byval" attribute are implicitly copied without
936
  // using an alloca instruction.  To produce redzones for those arguments, we
937
  // copy them a second time into memory allocated with an alloca instruction.
938
  void copyArgsPassedByValToAllocas();
939
940
  // Finds all Alloca instructions and puts
941
  // poisoned red zones around all of them.
942
  // Then unpoison everything back before the function returns.
943
  void processStaticAllocas();
944
  void processDynamicAllocas();
945
946
  void createDynamicAllocasInitStorage();
947
948
  // ----------------------- Visitors.
949
  /// Collect all Ret instructions.
950
752
  void visitReturnInst(ReturnInst &RI) { RetVec.push_back(&RI); }
951
952
  /// Collect all Resume instructions.
953
3
  void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
954
955
  /// Collect all CatchReturnInst instructions.
956
1
  void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
957
958
  void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
959
2
                                        Value *SavedStack) {
960
2
    IRBuilder<> IRB(InstBefore);
961
2
    Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
962
2
    // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
963
2
    // need to adjust extracted SP to compute the address of the most recent
964
2
    // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
965
2
    // this purpose.
966
2
    if (!isa<ReturnInst>(InstBefore)) {
967
0
      Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration(
968
0
          InstBefore->getModule(), Intrinsic::get_dynamic_area_offset,
969
0
          {IntptrTy});
970
0
971
0
      Value *DynamicAreaOffset = IRB.CreateCall(DynamicAreaOffsetFunc, {});
972
0
973
0
      DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
974
0
                                     DynamicAreaOffset);
975
0
    }
976
2
977
2
    IRB.CreateCall(
978
2
        AsanAllocasUnpoisonFunc,
979
2
        {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
980
2
  }
981
982
  // Unpoison dynamic allocas redzones.
983
2
  void unpoisonDynamicAllocas() {
984
2
    for (auto &Ret : RetVec)
985
2
      unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
986
2
987
2
    for (auto &StackRestoreInst : StackRestoreVec)
988
0
      unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
989
0
                                       StackRestoreInst->getOperand(0));
990
2
  }
991
992
  // Deploy and poison redzones around dynamic alloca call. To do this, we
993
  // should replace this call with another one with changed parameters and
994
  // replace all its uses with new address, so
995
  //   addr = alloca type, old_size, align
996
  // is replaced by
997
  //   new_size = (old_size + additional_size) * sizeof(type)
998
  //   tmp = alloca i8, new_size, max(align, 32)
999
  //   addr = tmp + 32 (first 32 bytes are for the left redzone).
1000
  // Additional_size is added to make new memory allocation contain not only
1001
  // requested memory, but also left, partial and right redzones.
1002
  void handleDynamicAllocaCall(AllocaInst *AI);
1003
1004
  /// Collect Alloca instructions we want (and can) handle.
1005
536
  void visitAllocaInst(AllocaInst &AI) {
1006
536
    if (!ASan.isInterestingAlloca(AI)) {
1007
381
      if (AI.isStaticAlloca()) {
1008
378
        // Skip over allocas that are present *before* the first instrumented
1009
378
        // alloca, we don't want to move those around.
1010
378
        if (AllocaVec.empty())
1011
367
          return;
1012
11
1013
11
        StaticAllocasToMoveUp.push_back(&AI);
1014
11
      }
1015
381
      
return14
;
1016
155
    }
1017
155
1018
155
    StackAlignment = std::max(StackAlignment, AI.getAlignment());
1019
155
    if (!AI.isStaticAlloca())
1020
3
      DynamicAllocaVec.push_back(&AI);
1021
152
    else
1022
152
      AllocaVec.push_back(&AI);
1023
155
  }
1024
1025
  /// Collect lifetime intrinsic calls to check for use-after-scope
1026
  /// errors.
1027
173
  void visitIntrinsicInst(IntrinsicInst &II) {
1028
173
    Intrinsic::ID ID = II.getIntrinsicID();
1029
173
    if (ID == Intrinsic::stackrestore) 
StackRestoreVec.push_back(&II)0
;
1030
173
    if (ID == Intrinsic::localescape) 
LocalEscapeCall = &II2
;
1031
173
    if (!ASan.UseAfterScope)
1032
108
      return;
1033
65
    if (!II.isLifetimeStartOrEnd())
1034
0
      return;
1035
65
    // Found lifetime intrinsic, add ASan instrumentation if necessary.
1036
65
    ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0));
1037
65
    // If size argument is undefined, don't do anything.
1038
65
    if (Size->isMinusOne()) 
return4
;
1039
61
    // Check that size doesn't saturate uint64_t and can
1040
61
    // be stored in IntptrTy.
1041
61
    const uint64_t SizeValue = Size->getValue().getLimitedValue();
1042
61
    if (SizeValue == ~0ULL ||
1043
61
        !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
1044
0
      return;
1045
61
    // Find alloca instruction that corresponds to llvm.lifetime argument.
1046
61
    AllocaInst *AI =
1047
61
        llvm::findAllocaForValue(II.getArgOperand(1), AllocaForValue);
1048
61
    if (!AI) {
1049
1
      HasUntracedLifetimeIntrinsic = true;
1050
1
      return;
1051
1
    }
1052
60
    // We're interested only in allocas we can handle.
1053
60
    if (!ASan.isInterestingAlloca(*AI))
1054
0
      return;
1055
60
    bool DoPoison = (ID == Intrinsic::lifetime_end);
1056
60
    AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
1057
60
    if (AI->isStaticAlloca())
1058
56
      StaticAllocaPoisonCallVec.push_back(APC);
1059
4
    else if (ClInstrumentDynamicAllocas)
1060
2
      DynamicAllocaPoisonCallVec.push_back(APC);
1061
60
  }
1062
1063
1.43k
  void visitCallSite(CallSite CS) {
1064
1.43k
    Instruction *I = CS.getInstruction();
1065
1.43k
    if (CallInst *CI = dyn_cast<CallInst>(I)) {
1066
1.42k
      HasNonEmptyInlineAsm |= CI->isInlineAsm() &&
1067
1.42k
                              
!CI->isIdenticalTo(EmptyInlineAsm.get())349
&&
1068
1.42k
                              
I != ASan.LocalDynamicShadow5
;
1069
1.42k
      HasReturnsTwiceCall |= CI->canReturnTwice();
1070
1.42k
    }
1071
1.43k
  }
1072
1073
  // ---------------------- Helpers.
1074
  void initializeCallbacks(Module &M);
1075
1076
  // Copies bytes from ShadowBytes into shadow memory for indexes where
1077
  // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1078
  // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1079
  void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1080
                    IRBuilder<> &IRB, Value *ShadowBase);
1081
  void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1082
                    size_t Begin, size_t End, IRBuilder<> &IRB,
1083
                    Value *ShadowBase);
1084
  void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1085
                          ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1086
                          size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1087
1088
  void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1089
1090
  Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1091
                               bool Dynamic);
1092
  PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1093
                     Instruction *ThenTerm, Value *ValueIfFalse);
1094
};
1095
1096
} // end anonymous namespace
1097
1098
302
void LocationMetadata::parse(MDNode *MDN) {
1099
302
  assert(MDN->getNumOperands() == 3);
1100
302
  MDString *DIFilename = cast<MDString>(MDN->getOperand(0));
1101
302
  Filename = DIFilename->getString();
1102
302
  LineNo = mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue();
1103
302
  ColumnNo =
1104
302
      mdconst::extract<ConstantInt>(MDN->getOperand(2))->getLimitedValue();
1105
302
}
1106
1107
// FIXME: It would be cleaner to instead attach relevant metadata to the globals
1108
// we want to sanitize instead and reading this metadata on each pass over a
1109
// function instead of reading module level metadata at first.
1110
360
GlobalsMetadata::GlobalsMetadata(Module &M) {
1111
360
  NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
1112
360
  if (!Globals)
1113
254
    return;
1114
418
  
for (auto MDN : Globals->operands())106
{
1115
418
    // Metadata node contains the global and the fields of "Entry".
1116
418
    assert(MDN->getNumOperands() == 5);
1117
418
    auto *V = mdconst::extract_or_null<Constant>(MDN->getOperand(0));
1118
418
    // The optimizer may optimize away a global entirely.
1119
418
    if (!V)
1120
0
      continue;
1121
418
    auto *StrippedV = V->stripPointerCasts();
1122
418
    auto *GV = dyn_cast<GlobalVariable>(StrippedV);
1123
418
    if (!GV)
1124
0
      continue;
1125
418
    // We can already have an entry for GV if it was merged with another
1126
418
    // global.
1127
418
    Entry &E = Entries[GV];
1128
418
    if (auto *Loc = cast_or_null<MDNode>(MDN->getOperand(1)))
1129
302
      E.SourceLoc.parse(Loc);
1130
418
    if (auto *Name = cast_or_null<MDString>(MDN->getOperand(2)))
1131
306
      E.Name = Name->getString();
1132
418
    ConstantInt *IsDynInit = mdconst::extract<ConstantInt>(MDN->getOperand(3));
1133
418
    E.IsDynInit |= IsDynInit->isOne();
1134
418
    ConstantInt *IsBlacklisted =
1135
418
        mdconst::extract<ConstantInt>(MDN->getOperand(4));
1136
418
    E.IsBlacklisted |= IsBlacklisted->isOne();
1137
418
  }
1138
106
}
1139
1140
AnalysisKey ASanGlobalsMetadataAnalysis::Key;
1141
1142
GlobalsMetadata ASanGlobalsMetadataAnalysis::run(Module &M,
1143
8
                                                 ModuleAnalysisManager &AM) {
1144
8
  return GlobalsMetadata(M);
1145
8
}
1146
1147
AddressSanitizerPass::AddressSanitizerPass(bool CompileKernel, bool Recover,
1148
                                           bool UseAfterScope)
1149
    : CompileKernel(CompileKernel), Recover(Recover),
1150
7
      UseAfterScope(UseAfterScope) {}
1151
1152
PreservedAnalyses AddressSanitizerPass::run(Function &F,
1153
33
                                            AnalysisManager<Function> &AM) {
1154
33
  auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
1155
33
  auto &MAM = MAMProxy.getManager();
1156
33
  Module &M = *F.getParent();
1157
33
  if (auto *R = MAM.getCachedResult<ASanGlobalsMetadataAnalysis>(M)) {
1158
33
    const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
1159
33
    AddressSanitizer Sanitizer(M, *R, CompileKernel, Recover, UseAfterScope);
1160
33
    if (Sanitizer.instrumentFunction(F, TLI))
1161
21
      return PreservedAnalyses::none();
1162
12
    return PreservedAnalyses::all();
1163
12
  }
1164
0
1165
0
  report_fatal_error(
1166
0
      "The ASanGlobalsMetadataAnalysis is required to run before "
1167
0
      "AddressSanitizer can run");
1168
0
  return PreservedAnalyses::all();
1169
0
}
1170
1171
ModuleAddressSanitizerPass::ModuleAddressSanitizerPass(bool CompileKernel,
1172
                                                       bool Recover,
1173
                                                       bool UseGlobalGC,
1174
                                                       bool UseOdrIndicator)
1175
    : CompileKernel(CompileKernel), Recover(Recover), UseGlobalGC(UseGlobalGC),
1176
8
      UseOdrIndicator(UseOdrIndicator) {}
1177
1178
PreservedAnalyses ModuleAddressSanitizerPass::run(Module &M,
1179
8
                                                  AnalysisManager<Module> &AM) {
1180
8
  GlobalsMetadata &GlobalsMD = AM.getResult<ASanGlobalsMetadataAnalysis>(M);
1181
8
  ModuleAddressSanitizer Sanitizer(M, GlobalsMD, CompileKernel, Recover,
1182
8
                                   UseGlobalGC, UseOdrIndicator);
1183
8
  if (Sanitizer.instrumentModule(M))
1184
0
    return PreservedAnalyses::none();
1185
8
  return PreservedAnalyses::all();
1186
8
}
1187
1188
INITIALIZE_PASS(ASanGlobalsMetadataWrapperPass, "asan-globals-md",
1189
                "Read metadata to mark which globals should be instrumented "
1190
                "when running ASan.",
1191
                false, true)
1192
1193
char AddressSanitizerLegacyPass::ID = 0;
1194
1195
11.0k
INITIALIZE_PASS_BEGIN(
1196
11.0k
    AddressSanitizerLegacyPass, "asan",
1197
11.0k
    "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
1198
11.0k
    false)
1199
11.0k
INITIALIZE_PASS_DEPENDENCY(ASanGlobalsMetadataWrapperPass)
1200
11.0k
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1201
11.0k
INITIALIZE_PASS_END(
1202
    AddressSanitizerLegacyPass, "asan",
1203
    "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
1204
    false)
1205
1206
FunctionPass *llvm::createAddressSanitizerFunctionPass(bool CompileKernel,
1207
                                                       bool Recover,
1208
58
                                                       bool UseAfterScope) {
1209
58
  assert(!CompileKernel || Recover);
1210
58
  return new AddressSanitizerLegacyPass(CompileKernel, Recover, UseAfterScope);
1211
58
}
1212
1213
char ModuleAddressSanitizerLegacyPass::ID = 0;
1214
1215
INITIALIZE_PASS(
1216
    ModuleAddressSanitizerLegacyPass, "asan-module",
1217
    "AddressSanitizer: detects use-after-free and out-of-bounds bugs."
1218
    "ModulePass",
1219
    false, false)
1220
1221
ModulePass *llvm::createModuleAddressSanitizerLegacyPassPass(
1222
58
    bool CompileKernel, bool Recover, bool UseGlobalsGC, bool UseOdrIndicator) {
1223
58
  assert(!CompileKernel || Recover);
1224
58
  return new ModuleAddressSanitizerLegacyPass(CompileKernel, Recover,
1225
58
                                              UseGlobalsGC, UseOdrIndicator);
1226
58
}
1227
1228
420
static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
1229
420
  size_t Res = countTrailingZeros(TypeSize / 8);
1230
420
  assert(Res < kNumberOfAccessSizes);
1231
420
  return Res;
1232
420
}
1233
1234
/// Create a global describing a source location.
1235
static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M,
1236
145
                                                       LocationMetadata MD) {
1237
145
  Constant *LocData[] = {
1238
145
      createPrivateGlobalForString(M, MD.Filename, true, kAsanGenPrefix),
1239
145
      ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo),
1240
145
      ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo),
1241
145
  };
1242
145
  auto LocStruct = ConstantStruct::getAnon(LocData);
1243
145
  auto GV = new GlobalVariable(M, LocStruct->getType(), true,
1244
145
                               GlobalValue::PrivateLinkage, LocStruct,
1245
145
                               kAsanGenPrefix);
1246
145
  GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
1247
145
  return GV;
1248
145
}
1249
1250
/// Check if \p G has been created by a trusted compiler pass.
1251
462
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G) {
1252
462
  // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1253
462
  if (G->getName().startswith("llvm."))
1254
25
    return true;
1255
437
1256
437
  // Do not instrument asan globals.
1257
437
  if (G->getName().startswith(kAsanGenPrefix) ||
1258
437
      
G->getName().startswith(kSanCovGenPrefix)362
||
1259
437
      
G->getName().startswith(kODRGenPrefix)360
)
1260
77
    return true;
1261
360
1262
360
  // Do not instrument gcov counter arrays.
1263
360
  if (G->getName() == "__llvm_gcov_ctr")
1264
1
    return true;
1265
359
1266
359
  return false;
1267
359
}
1268
1269
431
Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1270
431
  // Shadow >> scale
1271
431
  Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1272
431
  if (Mapping.Offset == 0) 
return Shadow0
;
1273
431
  // (Shadow >> scale) | offset
1274
431
  Value *ShadowBase;
1275
431
  if (LocalDynamicShadow)
1276
20
    ShadowBase = LocalDynamicShadow;
1277
411
  else
1278
411
    ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1279
431
  if (Mapping.OrShadowOffset)
1280
122
    return IRB.CreateOr(Shadow, ShadowBase);
1281
309
  else
1282
309
    return IRB.CreateAdd(Shadow, ShadowBase);
1283
431
}
1284
1285
// Instrument memset/memmove/memcpy
1286
21
void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
1287
21
  IRBuilder<> IRB(MI);
1288
21
  if (isa<MemTransferInst>(MI)) {
1289
17
    IRB.CreateCall(
1290
17
        isa<MemMoveInst>(MI) ? 
AsanMemmove4
:
AsanMemcpy13
,
1291
17
        {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
1292
17
         IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
1293
17
         IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1294
17
  } else 
if (4
isa<MemSetInst>(MI)4
) {
1295
4
    IRB.CreateCall(
1296
4
        AsanMemset,
1297
4
        {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
1298
4
         IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1299
4
         IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1300
4
  }
1301
21
  MI->eraseFromParent();
1302
21
}
1303
1304
/// Check if we want (and can) handle this alloca.
1305
1.43k
bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1306
1.43k
  auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI);
1307
1.43k
1308
1.43k
  if (PreviouslySeenAllocaInfo != ProcessedAllocas.end())
1309
895
    return PreviouslySeenAllocaInfo->getSecond();
1310
541
1311
541
  bool IsInteresting =
1312
541
      (AI.getAllocatedType()->isSized() &&
1313
541
       // alloca() may be called with 0 size, ignore it.
1314
541
       ((!AI.isStaticAlloca()) || 
getAllocaSizeInBytes(AI) > 0535
) &&
1315
541
       // We are only interested in allocas not promotable to registers.
1316
541
       // Promotable allocas are common under -O0.
1317
541
       (!ClSkipPromotableAllocas || 
!isAllocaPromotable(&AI)540
) &&
1318
541
       // inalloca allocas are not treated as static, and we don't want
1319
541
       // dynamic alloca instrumentation for them as well.
1320
541
       
!AI.isUsedWithInAlloca()167
&&
1321
541
       // swifterror allocas are register promoted by ISel
1322
541
       
!AI.isSwiftError()166
);
1323
541
1324
541
  ProcessedAllocas[&AI] = IsInteresting;
1325
541
  return IsInteresting;
1326
541
}
1327
1328
Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I,
1329
                                                   bool *IsWrite,
1330
                                                   uint64_t *TypeSize,
1331
                                                   unsigned *Alignment,
1332
5.12k
                                                   Value **MaybeMask) {
1333
5.12k
  // Skip memory accesses inserted by another instrumentation.
1334
5.12k
  if (I->getMetadata("nosanitize")) 
return nullptr241
;
1335
4.88k
1336
4.88k
  // Do not instrument the load fetching the dynamic shadow address.
1337
4.88k
  if (LocalDynamicShadow == I)
1338
24
    return nullptr;
1339
4.85k
1340
4.85k
  Value *PtrOperand = nullptr;
1341
4.85k
  const DataLayout &DL = I->getModule()->getDataLayout();
1342
4.85k
  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1343
1.08k
    if (!ClInstrumentReads) 
return nullptr26
;
1344
1.05k
    *IsWrite = false;
1345
1.05k
    *TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
1346
1.05k
    *Alignment = LI->getAlignment();
1347
1.05k
    PtrOperand = LI->getPointerOperand();
1348
3.77k
  } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1349
892
    if (!ClInstrumentWrites) 
return nullptr2
;
1350
890
    *IsWrite = true;
1351
890
    *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
1352
890
    *Alignment = SI->getAlignment();
1353
890
    PtrOperand = SI->getPointerOperand();
1354
2.88k
  } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1355
6
    if (!ClInstrumentAtomics) 
return nullptr0
;
1356
6
    *IsWrite = true;
1357
6
    *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
1358
6
    *Alignment = 0;
1359
6
    PtrOperand = RMW->getPointerOperand();
1360
2.88k
  } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1361
6
    if (!ClInstrumentAtomics) 
return nullptr0
;
1362
6
    *IsWrite = true;
1363
6
    *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
1364
6
    *Alignment = 0;
1365
6
    PtrOperand = XCHG->getPointerOperand();
1366
2.87k
  } else if (auto CI = dyn_cast<CallInst>(I)) {
1367
868
    auto *F = dyn_cast<Function>(CI->getCalledValue());
1368
868
    if (F && 
(860
F->getName().startswith("llvm.masked.load.")860
||
1369
860
              
F->getName().startswith("llvm.masked.store.")808
)) {
1370
104
      unsigned OpOffset = 0;
1371
104
      if (F->getName().startswith("llvm.masked.store.")) {
1372
52
        if (!ClInstrumentWrites)
1373
14
          return nullptr;
1374
38
        // Masked store has an initial operand for the value.
1375
38
        OpOffset = 1;
1376
38
        *IsWrite = true;
1377
52
      } else {
1378
52
        if (!ClInstrumentReads)
1379
14
          return nullptr;
1380
38
        *IsWrite = false;
1381
38
      }
1382
104
1383
104
      auto BasePtr = CI->getOperand(0 + OpOffset);
1384
76
      auto Ty = cast<PointerType>(BasePtr->getType())->getElementType();
1385
76
      *TypeSize = DL.getTypeStoreSizeInBits(Ty);
1386
76
      if (auto AlignmentConstant =
1387
76
              dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1388
76
        *Alignment = (unsigned)AlignmentConstant->getZExtValue();
1389
0
      else
1390
0
        *Alignment = 1; // No alignment guarantees. We probably got Undef
1391
76
      if (MaybeMask)
1392
52
        *MaybeMask = CI->getOperand(2 + OpOffset);
1393
76
      PtrOperand = BasePtr;
1394
76
    }
1395
868
  }
1396
4.85k
1397
4.85k
  
if (4.80k
PtrOperand4.80k
) {
1398
2.03k
    // Do not instrument acesses from different address spaces; we cannot deal
1399
2.03k
    // with them.
1400
2.03k
    Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType());
1401
2.03k
    if (PtrTy->getPointerAddressSpace() != 0)
1402
1
      return nullptr;
1403
2.03k
1404
2.03k
    // Ignore swifterror addresses.
1405
2.03k
    // swifterror memory addresses are mem2reg promoted by instruction
1406
2.03k
    // selection. As such they cannot have regular uses like an instrumentation
1407
2.03k
    // function and it makes no sense to track them as memory.
1408
2.03k
    if (PtrOperand->isSwiftError())
1409
12
      return nullptr;
1410
4.78k
  }
1411
4.78k
1412
4.78k
  // Treat memory accesses to promotable allocas as non-interesting since they
1413
4.78k
  // will not cause memory violations. This greatly speeds up the instrumented
1414
4.78k
  // executable at -O0.
1415
4.78k
  if (ClSkipPromotableAllocas)
1416
4.77k
    if (auto AI = dyn_cast_or_null<AllocaInst>(PtrOperand))
1417
840
      return isInterestingAlloca(*AI) ? 
AI122
:
nullptr718
;
1418
3.94k
1419
3.94k
  return PtrOperand;
1420
3.94k
}
1421
1422
8
static bool isPointerOperand(Value *V) {
1423
8
  return V->getType()->isPointerTy() || 
isa<PtrToIntInst>(V)4
;
1424
8
}
1425
1426
// This is a rough heuristic; it may cause both false positives and
1427
// false negatives. The proper implementation requires cooperation with
1428
// the frontend.
1429
16
static bool isInterestingPointerComparison(Instruction *I) {
1430
16
  if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1431
2
    if (!Cmp->isRelational())
1432
0
      return false;
1433
14
  } else {
1434
14
    return false;
1435
14
  }
1436
2
  return isPointerOperand(I->getOperand(0)) &&
1437
2
         isPointerOperand(I->getOperand(1));
1438
2
}
1439
1440
// This is a rough heuristic; it may cause both false positives and
1441
// false negatives. The proper implementation requires cooperation with
1442
// the frontend.
1443
15
static bool isInterestingPointerSubtraction(Instruction *I) {
1444
15
  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
1445
2
    if (BO->getOpcode() != Instruction::Sub)
1446
0
      return false;
1447
13
  } else {
1448
13
    return false;
1449
13
  }
1450
2
  return isPointerOperand(I->getOperand(0)) &&
1451
2
         isPointerOperand(I->getOperand(1));
1452
2
}
1453
1454
91
bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1455
91
  // If a global variable does not have dynamic initialization we don't
1456
91
  // have to instrument it.  However, if a global does not have initializer
1457
91
  // at all, we assume it has dynamic initializer (in other TU).
1458
91
  //
1459
91
  // FIXME: Metadata should be attched directly to the global directly instead
1460
91
  // of being added to llvm.asan.globals.
1461
91
  return G->hasInitializer() && 
!GlobalsMD.get(G).IsDynInit84
;
1462
91
}
1463
1464
void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1465
4
    Instruction *I) {
1466
4
  IRBuilder<> IRB(I);
1467
4
  FunctionCallee F = isa<ICmpInst>(I) ? 
AsanPtrCmpFunction2
:
AsanPtrSubFunction2
;
1468
4
  Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1469
8
  for (Value *&i : Param) {
1470
8
    if (i->getType()->isPointerTy())
1471
4
      i = IRB.CreatePointerCast(i, IntptrTy);
1472
8
  }
1473
4
  IRB.CreateCall(F, Param);
1474
4
}
1475
1476
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1477
                                Instruction *InsertBefore, Value *Addr,
1478
                                unsigned Alignment, unsigned Granularity,
1479
                                uint32_t TypeSize, bool IsWrite,
1480
                                Value *SizeArgument, bool UseCalls,
1481
396
                                uint32_t Exp) {
1482
396
  // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1483
396
  // if the data is properly aligned.
1484
396
  if ((TypeSize == 8 || 
TypeSize == 16337
||
TypeSize == 32333
||
TypeSize == 64124
||
1485
396
       
TypeSize == 12846
) &&
1486
396
      
(358
Alignment >= Granularity358
||
Alignment == 0263
||
Alignment >= TypeSize / 8204
))
1487
352
    return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite,
1488
352
                                   nullptr, UseCalls, Exp);
1489
44
  Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize,
1490
44
                                         IsWrite, nullptr, UseCalls, Exp);
1491
44
}
1492
1493
static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass,
1494
                                        const DataLayout &DL, Type *IntptrTy,
1495
                                        Value *Mask, Instruction *I,
1496
                                        Value *Addr, unsigned Alignment,
1497
                                        unsigned Granularity, uint32_t TypeSize,
1498
                                        bool IsWrite, Value *SizeArgument,
1499
24
                                        bool UseCalls, uint32_t Exp) {
1500
24
  auto *VTy = cast<PointerType>(Addr->getType())->getElementType();
1501
24
  uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1502
24
  unsigned Num = VTy->getVectorNumElements();
1503
24
  auto Zero = ConstantInt::get(IntptrTy, 0);
1504
136
  for (unsigned Idx = 0; Idx < Num; 
++Idx112
) {
1505
112
    Value *InstrumentedAddress = nullptr;
1506
112
    Instruction *InsertBefore = I;
1507
112
    if (auto *Vector = dyn_cast<ConstantVector>(Mask)) {
1508
96
      // dyn_cast as we might get UndefValue
1509
96
      if (auto *Masked = dyn_cast<ConstantInt>(Vector->getOperand(Idx))) {
1510
96
        if (Masked->isZero())
1511
58
          // Mask is constant false, so no instrumentation needed.
1512
58
          continue;
1513
16
        // If we have a true or undef value, fall through to doInstrumentAddress
1514
16
        // with InsertBefore == I
1515
16
      }
1516
16
    } else {
1517
16
      IRBuilder<> IRB(I);
1518
16
      Value *MaskElem = IRB.CreateExtractElement(Mask, Idx);
1519
16
      Instruction *ThenTerm = SplitBlockAndInsertIfThen(MaskElem, I, false);
1520
16
      InsertBefore = ThenTerm;
1521
16
    }
1522
112
1523
112
    IRBuilder<> IRB(InsertBefore);
1524
54
    InstrumentedAddress =
1525
54
        IRB.CreateGEP(VTy, Addr, {Zero, ConstantInt::get(IntptrTy, Idx)});
1526
54
    doInstrumentAddress(Pass, I, InsertBefore, InstrumentedAddress, Alignment,
1527
54
                        Granularity, ElemTypeSize, IsWrite, SizeArgument,
1528
54
                        UseCalls, Exp);
1529
54
  }
1530
24
}
1531
1532
void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1533
                                     Instruction *I, bool UseCalls,
1534
426
                                     const DataLayout &DL) {
1535
426
  bool IsWrite = false;
1536
426
  unsigned Alignment = 0;
1537
426
  uint64_t TypeSize = 0;
1538
426
  Value *MaybeMask = nullptr;
1539
426
  Value *Addr =
1540
426
      isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask);
1541
426
  assert(Addr);
1542
426
1543
426
  // Optimization experiments.
1544
426
  // The experiments can be used to evaluate potential optimizations that remove
1545
426
  // instrumentation (assess false negatives). Instead of completely removing
1546
426
  // some instrumentation, you set Exp to a non-zero value (mask of optimization
1547
426
  // experiments that want to remove instrumentation of this instruction).
1548
426
  // If Exp is non-zero, this pass will emit special calls into runtime
1549
426
  // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1550
426
  // make runtime terminate the program in a special way (with a different
1551
426
  // exit status). Then you run the new compiler on a buggy corpus, collect
1552
426
  // the special terminations (ideally, you don't see them at all -- no false
1553
426
  // negatives) and make the decision on the optimization.
1554
426
  uint32_t Exp = ClForceExperiment;
1555
426
1556
426
  if (ClOpt && 
ClOptGlobals424
) {
1557
424
    // If initialization order checking is disabled, a simple access to a
1558
424
    // dynamically initialized global is always valid.
1559
424
    GlobalVariable *G = dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, DL));
1560
424
    if (G && 
(91
!ClInitializers91
||
GlobalIsLinkerInitialized(G)91
) &&
1561
424
        
isSafeAccess(ObjSizeVis, Addr, TypeSize)67
) {
1562
60
      NumOptimizedAccessesToGlobalVar++;
1563
60
      return;
1564
60
    }
1565
366
  }
1566
366
1567
366
  if (ClOpt && 
ClOptStack364
) {
1568
4
    // A direct inbounds access to a stack variable is always valid.
1569
4
    if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
1570
4
        isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
1571
0
      NumOptimizedAccessesToStackVar++;
1572
0
      return;
1573
0
    }
1574
366
  }
1575
366
1576
366
  if (IsWrite)
1577
179
    NumInstrumentedWrites++;
1578
187
  else
1579
187
    NumInstrumentedReads++;
1580
366
1581
366
  unsigned Granularity = 1 << Mapping.Scale;
1582
366
  if (MaybeMask) {
1583
24
    instrumentMaskedLoadOrStore(this, DL, IntptrTy, MaybeMask, I, Addr,
1584
24
                                Alignment, Granularity, TypeSize, IsWrite,
1585
24
                                nullptr, UseCalls, Exp);
1586
342
  } else {
1587
342
    doInstrumentAddress(this, I, I, Addr, Alignment, Granularity, TypeSize,
1588
342
                        IsWrite, nullptr, UseCalls, Exp);
1589
342
  }
1590
366
}
1591
1592
Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1593
                                                 Value *Addr, bool IsWrite,
1594
                                                 size_t AccessSizeIndex,
1595
                                                 Value *SizeArgument,
1596
344
                                                 uint32_t Exp) {
1597
344
  IRBuilder<> IRB(InsertBefore);
1598
344
  Value *ExpVal = Exp == 0 ? 
nullptr330
:
ConstantInt::get(IRB.getInt32Ty(), Exp)14
;
1599
344
  CallInst *Call = nullptr;
1600
344
  if (SizeArgument) {
1601
68
    if (Exp == 0)
1602
64
      Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0],
1603
64
                            {Addr, SizeArgument});
1604
4
    else
1605
4
      Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1],
1606
4
                            {Addr, SizeArgument, ExpVal});
1607
276
  } else {
1608
276
    if (Exp == 0)
1609
266
      Call =
1610
266
          IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1611
10
    else
1612
10
      Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex],
1613
10
                            {Addr, ExpVal});
1614
276
  }
1615
344
1616
344
  // We don't do Call->setDoesNotReturn() because the BB already has
1617
344
  // UnreachableInst at the end.
1618
344
  // This EmptyAsm is required to avoid callback merge.
1619
344
  IRB.CreateCall(EmptyAsm, {});
1620
344
  return Call;
1621
344
}
1622
1623
Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1624
                                           Value *ShadowValue,
1625
276
                                           uint32_t TypeSize) {
1626
276
  size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1627
276
  // Addr & (Granularity - 1)
1628
276
  Value *LastAccessedByte =
1629
276
      IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1630
276
  // (Addr & (Granularity - 1)) + size - 1
1631
276
  if (TypeSize / 8 > 1)
1632
151
    LastAccessedByte = IRB.CreateAdd(
1633
151
        LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
1634
276
  // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1635
276
  LastAccessedByte =
1636
276
      IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1637
276
  // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1638
276
  return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1639
276
}
1640
1641
void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1642
                                         Instruction *InsertBefore, Value *Addr,
1643
                                         uint32_t TypeSize, bool IsWrite,
1644
                                         Value *SizeArgument, bool UseCalls,
1645
420
                                         uint32_t Exp) {
1646
420
  bool IsMyriad = TargetTriple.getVendor() == llvm::Triple::Myriad;
1647
420
1648
420
  IRBuilder<> IRB(InsertBefore);
1649
420
  Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1650
420
  size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
1651
420
1652
420
  if (UseCalls) {
1653
76
    if (Exp == 0)
1654
66
      IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex],
1655
66
                     AddrLong);
1656
10
    else
1657
10
      IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1658
10
                     {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1659
76
    return;
1660
76
  }
1661
344
1662
344
  if (IsMyriad) {
1663
2
    // Strip the cache bit and do range check.
1664
2
    // AddrLong &= ~kMyriadCacheBitMask32
1665
2
    AddrLong = IRB.CreateAnd(AddrLong, ~kMyriadCacheBitMask32);
1666
2
    // Tag = AddrLong >> kMyriadTagShift
1667
2
    Value *Tag = IRB.CreateLShr(AddrLong, kMyriadTagShift);
1668
2
    // Tag == kMyriadDDRTag
1669
2
    Value *TagCheck =
1670
2
        IRB.CreateICmpEQ(Tag, ConstantInt::get(IntptrTy, kMyriadDDRTag));
1671
2
1672
2
    Instruction *TagCheckTerm =
1673
2
        SplitBlockAndInsertIfThen(TagCheck, InsertBefore, false,
1674
2
                                  MDBuilder(*C).createBranchWeights(1, 100000));
1675
2
    assert(cast<BranchInst>(TagCheckTerm)->isUnconditional());
1676
2
    IRB.SetInsertPoint(TagCheckTerm);
1677
2
    InsertBefore = TagCheckTerm;
1678
2
  }
1679
344
1680
344
  Type *ShadowTy =
1681
344
      IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale));
1682
344
  Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
1683
344
  Value *ShadowPtr = memToShadow(AddrLong, IRB);
1684
344
  Value *CmpVal = Constant::getNullValue(ShadowTy);
1685
344
  Value *ShadowValue =
1686
344
      IRB.CreateLoad(ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
1687
344
1688
344
  Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
1689
344
  size_t Granularity = 1ULL << Mapping.Scale;
1690
344
  Instruction *CrashTerm = nullptr;
1691
344
1692
344
  if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
1693
276
    // We use branch weights for the slow path check, to indicate that the slow
1694
276
    // path is rarely taken. This seems to be the case for SPEC benchmarks.
1695
276
    Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1696
276
        Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000));
1697
276
    assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1698
276
    BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1699
276
    IRB.SetInsertPoint(CheckTerm);
1700
276
    Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
1701
276
    if (Recover) {
1702
33
      CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
1703
243
    } else {
1704
243
      BasicBlock *CrashBlock =
1705
243
        BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1706
243
      CrashTerm = new UnreachableInst(*C, CrashBlock);
1707
243
      BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1708
243
      ReplaceInstWithInst(CheckTerm, NewTerm);
1709
243
    }
1710
276
  } else {
1711
68
    CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
1712
68
  }
1713
344
1714
344
  Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite,
1715
344
                                         AccessSizeIndex, SizeArgument, Exp);
1716
344
  Crash->setDebugLoc(OrigIns->getDebugLoc());
1717
344
}
1718
1719
// Instrument unusual size or unusual alignment.
1720
// We can not do it with a single check, so we do 1-byte check for the first
1721
// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
1722
// to report the actual access size.
1723
void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1724
    Instruction *I, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize,
1725
44
    bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) {
1726
44
  IRBuilder<> IRB(InsertBefore);
1727
44
  Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
1728
44
  Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1729
44
  if (UseCalls) {
1730
10
    if (Exp == 0)
1731
8
      IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0],
1732
8
                     {AddrLong, Size});
1733
2
    else
1734
2
      IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1],
1735
2
                     {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1736
34
  } else {
1737
34
    Value *LastByte = IRB.CreateIntToPtr(
1738
34
        IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
1739
34
        Addr->getType());
1740
34
    instrumentAddress(I, InsertBefore, Addr, 8, IsWrite, Size, false, Exp);
1741
34
    instrumentAddress(I, InsertBefore, LastByte, 8, IsWrite, Size, false, Exp);
1742
34
  }
1743
44
}
1744
1745
void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit,
1746
18
                                                  GlobalValue *ModuleName) {
1747
18
  // Set up the arguments to our poison/unpoison functions.
1748
18
  IRBuilder<> IRB(&GlobalInit.front(),
1749
18
                  GlobalInit.front().getFirstInsertionPt());
1750
18
1751
18
  // Add a call to poison all external globals before the given function starts.
1752
18
  Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
1753
18
  IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
1754
18
1755
18
  // Add calls to unpoison all globals before each return instruction.
1756
18
  for (auto &BB : GlobalInit.getBasicBlockList())
1757
18
    if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
1758
18
      CallInst::Create(AsanUnpoisonGlobals, "", RI);
1759
18
}
1760
1761
void ModuleAddressSanitizer::createInitializerPoisonCalls(
1762
20
    Module &M, GlobalValue *ModuleName) {
1763
20
  GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
1764
20
  if (!GV)
1765
2
    return;
1766
18
1767
18
  ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer());
1768
18
  if (!CA)
1769
0
    return;
1770
18
1771
20
  
for (Use &OP : CA->operands())18
{
1772
20
    if (isa<ConstantAggregateZero>(OP)) 
continue0
;
1773
20
    ConstantStruct *CS = cast<ConstantStruct>(OP);
1774
20
1775
20
    // Must have a function or null ptr.
1776
20
    if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
1777
20
      if (F->getName() == kAsanModuleCtorName) 
continue0
;
1778
20
      ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
1779
20
      // Don't instrument CTORs that will run before asan.module_ctor.
1780
20
      if (Priority->getLimitedValue() <= kAsanCtorAndDtorPriority) 
continue2
;
1781
18
      poisonOneInitializer(*F, ModuleName);
1782
18
    }
1783
20
  }
1784
18
}
1785
1786
618
bool ModuleAddressSanitizer::ShouldInstrumentGlobal(GlobalVariable *G) {
1787
618
  Type *Ty = G->getValueType();
1788
618
  LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
1789
618
1790
618
  // FIXME: Metadata should be attched directly to the global directly instead
1791
618
  // of being added to llvm.asan.globals.
1792
618
  if (GlobalsMD.get(G).IsBlacklisted) 
return false35
;
1793
583
  if (!Ty->isSized()) 
return false2
;
1794
581
  if (!G->hasInitializer()) 
return false119
;
1795
462
  if (GlobalWasGeneratedByCompiler(G)) 
return false103
; // Our own globals.
1796
359
  // Two problems with thread-locals:
1797
359
  //   - The address of the main thread's copy can't be computed at link-time.
1798
359
  //   - Need to poison all copies, not just the main thread's one.
1799
359
  if (G->isThreadLocal()) 
return false1
;
1800
358
  // For now, just ignore this Global if the alignment is large.
1801
358
  if (G->getAlignment() > MinRedzoneSizeForGlobal()) 
return false0
;
1802
358
1803
358
  // For non-COFF targets, only instrument globals known to be defined by this
1804
358
  // TU.
1805
358
  // FIXME: We can instrument comdat globals on ELF if we are using the
1806
358
  // GC-friendly metadata scheme.
1807
358
  if (!TargetTriple.isOSBinFormatCOFF()) {
1808
336
    if (!G->hasExactDefinition() || 
G->hasComdat()272
)
1809
65
      return false;
1810
22
  } else {
1811
22
    // On COFF, don't instrument non-ODR linkages.
1812
22
    if (G->isInterposable())
1813
0
      return false;
1814
293
  }
1815
293
1816
293
  // If a comdat is present, it must have a selection kind that implies ODR
1817
293
  // semantics: no duplicates, any, or exact match.
1818
293
  if (Comdat *C = G->getComdat()) {
1819
9
    switch (C->getSelectionKind()) {
1820
9
    case Comdat::Any:
1821
8
    case Comdat::ExactMatch:
1822
8
    case Comdat::NoDuplicates:
1823
8
      break;
1824
8
    case Comdat::Largest:
1825
1
    case Comdat::SameSize:
1826
1
      return false;
1827
292
    }
1828
292
  }
1829
292
1830
292
  if (G->hasSection()) {
1831
38
    StringRef Section = G->getSection();
1832
38
1833
38
    // Globals from llvm.metadata aren't emitted, do not instrument them.
1834
38
    if (Section == "llvm.metadata") 
return false2
;
1835
36
    // Do not instrument globals from special LLVM sections.
1836
36
    if (Section.find("__llvm") != StringRef::npos || 
Section.find("__LLVM") != StringRef::npos32
)
return false5
;
1837
31
1838
31
    // Do not instrument function pointers to initialization and termination
1839
31
    // routines: dynamic linker will not properly handle redzones.
1840
31
    if (Section.startswith(".preinit_array") ||
1841
31
        
Section.startswith(".init_array")30
||
1842
31
        
Section.startswith(".fini_array")29
) {
1843
3
      return false;
1844
3
    }
1845
28
1846
28
    // On COFF, if the section name contains '$', it is highly likely that the
1847
28
    // user is using section sorting to create an array of globals similar to
1848
28
    // the way initialization callbacks are registered in .init_array and
1849
28
    // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
1850
28
    // to such globals is counterproductive, because the intent is that they
1851
28
    // will form an array, and out-of-bounds accesses are expected.
1852
28
    // See https://github.com/google/sanitizers/issues/305
1853
28
    // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
1854
28
    if (TargetTriple.isOSBinFormatCOFF() && 
Section.contains('$')6
) {
1855
6
      LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
1856
6
                        << *G << "\n");
1857
6
      return false;
1858
6
    }
1859
22
1860
22
    if (TargetTriple.isOSBinFormatMachO()) {
1861
22
      StringRef ParsedSegment, ParsedSection;
1862
22
      unsigned TAA = 0, StubSize = 0;
1863
22
      bool TAAParsed;
1864
22
      std::string ErrorCode = MCSectionMachO::ParseSectionSpecifier(
1865
22
          Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize);
1866
22
      assert(ErrorCode.empty() && "Invalid section specifier.");
1867
22
1868
22
      // Ignore the globals from the __OBJC section. The ObjC runtime assumes
1869
22
      // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
1870
22
      // them.
1871
22
      if (ParsedSegment == "__OBJC" ||
1872
22
          (ParsedSegment == "__DATA" && 
ParsedSection.startswith("__objc_")13
)) {
1873
13
        LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
1874
13
        return false;
1875
13
      }
1876
9
      // See https://github.com/google/sanitizers/issues/32
1877
9
      // Constant CFString instances are compiled in the following way:
1878
9
      //  -- the string buffer is emitted into
1879
9
      //     __TEXT,__cstring,cstring_literals
1880
9
      //  -- the constant NSConstantString structure referencing that buffer
1881
9
      //     is placed into __DATA,__cfstring
1882
9
      // Therefore there's no point in placing redzones into __DATA,__cfstring.
1883
9
      // Moreover, it causes the linker to crash on OS X 10.7
1884
9
      if (ParsedSegment == "__DATA" && 
ParsedSection == "__cfstring"0
) {
1885
0
        LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
1886
0
        return false;
1887
0
      }
1888
9
      // The linker merges the contents of cstring_literals and removes the
1889
9
      // trailing zeroes.
1890
9
      if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
1891
9
        LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
1892
9
        return false;
1893
9
      }
1894
254
    }
1895
22
  }
1896
254
1897
254
  return true;
1898
254
}
1899
1900
// On Mach-O platforms, we emit global metadata in a separate section of the
1901
// binary in order to allow the linker to properly dead strip. This is only
1902
// supported on recent versions of ld64.
1903
10
bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
1904
10
  if (!TargetTriple.isOSBinFormatMachO())
1905
1
    return false;
1906
9
1907
9
  if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
1908
7
    return true;
1909
2
  if (TargetTriple.isiOS() /* or tvOS */ && 
!TargetTriple.isOSVersionLT(9)0
)
1910
0
    return true;
1911
2
  if (TargetTriple.isWatchOS() && 
!TargetTriple.isOSVersionLT(2)0
)
1912
0
    return true;
1913
2
1914
2
  return false;
1915
2
}
1916
1917
149
StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
1918
149
  switch (TargetTriple.getObjectFormat()) {
1919
149
  
case Triple::COFF: return ".ASAN$GL"7
;
1920
149
  
case Triple::ELF: return "asan_globals"133
;
1921
149
  
case Triple::MachO: return "__DATA,__asan_globals,regular"9
;
1922
149
  case Triple::Wasm:
1923
0
  case Triple::XCOFF:
1924
0
    report_fatal_error(
1925
0
        "ModuleAddressSanitizer not implemented for object file format.");
1926
0
  case Triple::UnknownObjectFormat:
1927
0
    break;
1928
0
  }
1929
0
  llvm_unreachable("unsupported object format");
1930
0
}
1931
1932
173
void ModuleAddressSanitizer::initializeCallbacks(Module &M) {
1933
173
  IRBuilder<> IRB(*C);
1934
173
1935
173
  // Declare our poisoning and unpoisoning functions.
1936
173
  AsanPoisonGlobals =
1937
173
      M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
1938
173
  AsanUnpoisonGlobals =
1939
173
      M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
1940
173
1941
173
  // Declare functions that register/unregister globals.
1942
173
  AsanRegisterGlobals = M.getOrInsertFunction(
1943
173
      kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
1944
173
  AsanUnregisterGlobals = M.getOrInsertFunction(
1945
173
      kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
1946
173
1947
173
  // Declare the functions that find globals in a shared object and then invoke
1948
173
  // the (un)register function on them.
1949
173
  AsanRegisterImageGlobals = M.getOrInsertFunction(
1950
173
      kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
1951
173
  AsanUnregisterImageGlobals = M.getOrInsertFunction(
1952
173
      kAsanUnregisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
1953
173
1954
173
  AsanRegisterElfGlobals =
1955
173
      M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
1956
173
                            IntptrTy, IntptrTy, IntptrTy);
1957
173
  AsanUnregisterElfGlobals =
1958
173
      M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
1959
173
                            IntptrTy, IntptrTy, IntptrTy);
1960
173
}
1961
1962
// Put the metadata and the instrumented global in the same group. This ensures
1963
// that the metadata is discarded if the instrumented global is discarded.
1964
void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
1965
102
    GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
1966
102
  Module &M = *G->getParent();
1967
102
  Comdat *C = G->getComdat();
1968
102
  if (!C) {
1969
100
    if (!G->hasName()) {
1970
2
      // If G is unnamed, it must be internal. Give it an artificial name
1971
2
      // so we can put it in a comdat.
1972
2
      assert(G->hasLocalLinkage());
1973
2
      G->setName(Twine(kAsanGenPrefix) + "_anon_global");
1974
2
    }
1975
100
1976
100
    if (!InternalSuffix.empty() && 
G->hasLocalLinkage()95
) {
1977
26
      std::string Name = G->getName();
1978
26
      Name += InternalSuffix;
1979
26
      C = M.getOrInsertComdat(Name);
1980
74
    } else {
1981
74
      C = M.getOrInsertComdat(G->getName());
1982
74
    }
1983
100
1984
100
    // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
1985
100
    // linkage to internal linkage so that a symbol table entry is emitted. This
1986
100
    // is necessary in order to create the comdat group.
1987
100
    if (TargetTriple.isOSBinFormatCOFF()) {
1988
5
      C->setSelectionKind(Comdat::NoDuplicates);
1989
5
      if (G->hasPrivateLinkage())
1990
0
        G->setLinkage(GlobalValue::InternalLinkage);
1991
5
    }
1992
100
    G->setComdat(C);
1993
100
  }
1994
102
1995
102
  assert(G->hasComdat());
1996
102
  Metadata->setComdat(G->getComdat());
1997
102
}
1998
1999
// Create a separate metadata global and put it in the appropriate ASan
2000
// global registration section.
2001
GlobalVariable *
2002
ModuleAddressSanitizer::CreateMetadataGlobal(Module &M, Constant *Initializer,
2003
111
                                             StringRef OriginalName) {
2004
111
  auto Linkage = TargetTriple.isOSBinFormatMachO()
2005
111
                     ? 
GlobalVariable::InternalLinkage9
2006
111
                     : 
GlobalVariable::PrivateLinkage102
;
2007
111
  GlobalVariable *Metadata = new GlobalVariable(
2008
111
      M, Initializer->getType(), false, Linkage, Initializer,
2009
111
      Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
2010
111
  Metadata->setSection(getGlobalMetadataSection());
2011
111
  return Metadata;
2012
111
}
2013
2014
66
IRBuilder<> ModuleAddressSanitizer::CreateAsanModuleDtor(Module &M) {
2015
66
  AsanDtorFunction =
2016
66
      Function::Create(FunctionType::get(Type::getVoidTy(*C), false),
2017
66
                       GlobalValue::InternalLinkage, kAsanModuleDtorName, &M);
2018
66
  BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2019
66
2020
66
  return IRBuilder<>(ReturnInst::Create(*C, AsanDtorBB));
2021
66
}
2022
2023
void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2024
    IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2025
5
    ArrayRef<Constant *> MetadataInitializers) {
2026
5
  assert(ExtendedGlobals.size() == MetadataInitializers.size());
2027
5
  auto &DL = M.getDataLayout();
2028
5
2029
12
  for (size_t i = 0; i < ExtendedGlobals.size(); 
i++7
) {
2030
7
    Constant *Initializer = MetadataInitializers[i];
2031
7
    GlobalVariable *G = ExtendedGlobals[i];
2032
7
    GlobalVariable *Metadata =
2033
7
        CreateMetadataGlobal(M, Initializer, G->getName());
2034
7
2035
7
    // The MSVC linker always inserts padding when linking incrementally. We
2036
7
    // cope with that by aligning each struct to its size, which must be a power
2037
7
    // of two.
2038
7
    unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2039
7
    assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2040
7
           "global metadata will not be padded appropriately");
2041
7
    Metadata->setAlignment(SizeOfGlobalStruct);
2042
7
2043
7
    SetComdatForGlobalMetadata(G, Metadata, "");
2044
7
  }
2045
5
}
2046
2047
void ModuleAddressSanitizer::InstrumentGlobalsELF(
2048
    IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2049
    ArrayRef<Constant *> MetadataInitializers,
2050
19
    const std::string &UniqueModuleId) {
2051
19
  assert(ExtendedGlobals.size() == MetadataInitializers.size());
2052
19
2053
19
  SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2054
114
  for (size_t i = 0; i < ExtendedGlobals.size(); 
i++95
) {
2055
95
    GlobalVariable *G = ExtendedGlobals[i];
2056
95
    GlobalVariable *Metadata =
2057
95
        CreateMetadataGlobal(M, MetadataInitializers[i], G->getName());
2058
95
    MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2059
95
    Metadata->setMetadata(LLVMContext::MD_associated, MD);
2060
95
    MetadataGlobals[i] = Metadata;
2061
95
2062
95
    SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2063
95
  }
2064
19
2065
19
  // Update llvm.compiler.used, adding the new metadata globals. This is
2066
19
  // needed so that during LTO these variables stay alive.
2067
19
  if (!MetadataGlobals.empty())
2068
19
    appendToCompilerUsed(M, MetadataGlobals);
2069
19
2070
19
  // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2071
19
  // to look up the loaded image that contains it. Second, we can store in it
2072
19
  // whether registration has already occurred, to prevent duplicate
2073
19
  // registration.
2074
19
  //
2075
19
  // Common linkage ensures that there is only one global per shared library.
2076
19
  GlobalVariable *RegisteredFlag = new GlobalVariable(
2077
19
      M, IntptrTy, false, GlobalVariable::CommonLinkage,
2078
19
      ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2079
19
  RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility);
2080
19
2081
19
  // Create start and stop symbols.
2082
19
  GlobalVariable *StartELFMetadata = new GlobalVariable(
2083
19
      M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2084
19
      "__start_" + getGlobalMetadataSection());
2085
19
  StartELFMetadata->setVisibility(GlobalVariable::HiddenVisibility);
2086
19
  GlobalVariable *StopELFMetadata = new GlobalVariable(
2087
19
      M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2088
19
      "__stop_" + getGlobalMetadataSection());
2089
19
  StopELFMetadata->setVisibility(GlobalVariable::HiddenVisibility);
2090
19
2091
19
  // Create a call to register the globals with the runtime.
2092
19
  IRB.CreateCall(AsanRegisterElfGlobals,
2093
19
                 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2094
19
                  IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2095
19
                  IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2096
19
2097
19
  // We also need to unregister globals at the end, e.g., when a shared library
2098
19
  // gets closed.
2099
19
  IRBuilder<> IRB_Dtor = CreateAsanModuleDtor(M);
2100
19
  IRB_Dtor.CreateCall(AsanUnregisterElfGlobals,
2101
19
                      {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2102
19
                       IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2103
19
                       IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2104
19
}
2105
2106
void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2107
    IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2108
7
    ArrayRef<Constant *> MetadataInitializers) {
2109
7
  assert(ExtendedGlobals.size() == MetadataInitializers.size());
2110
7
2111
7
  // On recent Mach-O platforms, use a structure which binds the liveness of
2112
7
  // the global variable to the metadata struct. Keep the list of "Liveness" GV
2113
7
  // created to be added to llvm.compiler.used
2114
7
  StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2115
7
  SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2116
7
2117
16
  for (size_t i = 0; i < ExtendedGlobals.size(); 
i++9
) {
2118
9
    Constant *Initializer = MetadataInitializers[i];
2119
9
    GlobalVariable *G = ExtendedGlobals[i];
2120
9
    GlobalVariable *Metadata =
2121
9
        CreateMetadataGlobal(M, Initializer, G->getName());
2122
9
2123
9
    // On recent Mach-O platforms, we emit the global metadata in a way that
2124
9
    // allows the linker to properly strip dead globals.
2125
9
    auto LivenessBinder =
2126
9
        ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2127
9
                            ConstantExpr::getPointerCast(Metadata, IntptrTy));
2128
9
    GlobalVariable *Liveness = new GlobalVariable(
2129
9
        M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2130
9
        Twine("__asan_binder_") + G->getName());
2131
9
    Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2132
9
    LivenessGlobals[i] = Liveness;
2133
9
  }
2134
7
2135
7
  // Update llvm.compiler.used, adding the new liveness globals. This is
2136
7
  // needed so that during LTO these variables stay alive. The alternative
2137
7
  // would be to have the linker handling the LTO symbols, but libLTO
2138
7
  // current API does not expose access to the section for each symbol.
2139
7
  if (!LivenessGlobals.empty())
2140
7
    appendToCompilerUsed(M, LivenessGlobals);
2141
7
2142
7
  // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2143
7
  // to look up the loaded image that contains it. Second, we can store in it
2144
7
  // whether registration has already occurred, to prevent duplicate
2145
7
  // registration.
2146
7
  //
2147
7
  // common linkage ensures that there is only one global per shared library.
2148
7
  GlobalVariable *RegisteredFlag = new GlobalVariable(
2149
7
      M, IntptrTy, false, GlobalVariable::CommonLinkage,
2150
7
      ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2151
7
  RegisteredFlag->setVisibility(GlobalVariable::HiddenVisibility);
2152
7
2153
7
  IRB.CreateCall(AsanRegisterImageGlobals,
2154
7
                 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2155
7
2156
7
  // We also need to unregister globals at the end, e.g., when a shared library
2157
7
  // gets closed.
2158
7
  IRBuilder<> IRB_Dtor = CreateAsanModuleDtor(M);
2159
7
  IRB_Dtor.CreateCall(AsanUnregisterImageGlobals,
2160
7
                      {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2161
7
}
2162
2163
void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2164
    IRBuilder<> &IRB, Module &M, ArrayRef<GlobalVariable *> ExtendedGlobals,
2165
40
    ArrayRef<Constant *> MetadataInitializers) {
2166
40
  assert(ExtendedGlobals.size() == MetadataInitializers.size());
2167
40
  unsigned N = ExtendedGlobals.size();
2168
40
  assert(N > 0);
2169
40
2170
40
  // On platforms that don't have a custom metadata section, we emit an array
2171
40
  // of global metadata structures.
2172
40
  ArrayType *ArrayOfGlobalStructTy =
2173
40
      ArrayType::get(MetadataInitializers[0]->getType(), N);
2174
40
  auto AllGlobals = new GlobalVariable(
2175
40
      M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2176
40
      ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2177
40
  if (Mapping.Scale > 3)
2178
1
    AllGlobals->setAlignment(1ULL << Mapping.Scale);
2179
40
2180
40
  IRB.CreateCall(AsanRegisterGlobals,
2181
40
                 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2182
40
                  ConstantInt::get(IntptrTy, N)});
2183
40
2184
40
  // We also need to unregister globals at the end, e.g., when a shared library
2185
40
  // gets closed.
2186
40
  IRBuilder<> IRB_Dtor = CreateAsanModuleDtor(M);
2187
40
  IRB_Dtor.CreateCall(AsanUnregisterGlobals,
2188
40
                      {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2189
40
                       ConstantInt::get(IntptrTy, N)});
2190
40
}
2191
2192
// This function replaces all global variables with new variables that have
2193
// trailing redzones. It also creates a function that poisons
2194
// redzones and inserts this function into llvm.global_ctors.
2195
// Sets *CtorComdat to true if the global registration code emitted into the
2196
// asan constructor is comdat-compatible.
2197
bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M,
2198
167
                                               bool *CtorComdat) {
2199
167
  *CtorComdat = false;
2200
167
2201
167
  SmallVector<GlobalVariable *, 16> GlobalsToChange;
2202
167
2203
618
  for (auto &G : M.globals()) {
2204
618
    if (ShouldInstrumentGlobal(&G)) 
GlobalsToChange.push_back(&G)254
;
2205
618
  }
2206
167
2207
167
  size_t n = GlobalsToChange.size();
2208
167
  if (n == 0) {
2209
96
    *CtorComdat = true;
2210
96
    return false;
2211
96
  }
2212
71
2213
71
  auto &DL = M.getDataLayout();
2214
71
2215
71
  // A global is described by a structure
2216
71
  //   size_t beg;
2217
71
  //   size_t size;
2218
71
  //   size_t size_with_redzone;
2219
71
  //   const char *name;
2220
71
  //   const char *module_name;
2221
71
  //   size_t has_dynamic_init;
2222
71
  //   void *source_location;
2223
71
  //   size_t odr_indicator;
2224
71
  // We initialize an array of such structures and pass it to a run-time call.
2225
71
  StructType *GlobalStructTy =
2226
71
      StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2227
71
                      IntptrTy, IntptrTy, IntptrTy);
2228
71
  SmallVector<GlobalVariable *, 16> NewGlobals(n);
2229
71
  SmallVector<Constant *, 16> Initializers(n);
2230
71
2231
71
  bool HasDynamicallyInitializedGlobals = false;
2232
71
2233
71
  // We shouldn't merge same module names, as this string serves as unique
2234
71
  // module ID in runtime.
2235
71
  GlobalVariable *ModuleName = createPrivateGlobalForString(
2236
71
      M, M.getModuleIdentifier(), /*AllowMerging*/ false, kAsanGenPrefix);
2237
71
2238
325
  for (size_t i = 0; i < n; 
i++254
) {
2239
254
    static const uint64_t kMaxGlobalRedzone = 1 << 18;
2240
254
    GlobalVariable *G = GlobalsToChange[i];
2241
254
2242
254
    // FIXME: Metadata should be attched directly to the global directly instead
2243
254
    // of being added to llvm.asan.globals.
2244
254
    auto MD = GlobalsMD.get(G);
2245
254
    StringRef NameForGlobal = G->getName();
2246
254
    // Create string holding the global name (use global name from metadata
2247
254
    // if it's available, otherwise just write the name of global variable).
2248
254
    GlobalVariable *Name = createPrivateGlobalForString(
2249
254
        M, MD.Name.empty() ? 
NameForGlobal108
:
MD.Name146
,
2250
254
        /*AllowMerging*/ true, kAsanGenPrefix);
2251
254
2252
254
    Type *Ty = G->getValueType();
2253
254
    uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2254
254
    uint64_t MinRZ = MinRedzoneSizeForGlobal();
2255
254
    // MinRZ <= RZ <= kMaxGlobalRedzone
2256
254
    // and trying to make RZ to be ~ 1/4 of SizeInBytes.
2257
254
    uint64_t RZ = std::max(
2258
254
        MinRZ, std::min(kMaxGlobalRedzone, (SizeInBytes / MinRZ / 4) * MinRZ));
2259
254
    uint64_t RightRedzoneSize = RZ;
2260
254
    // Round up to MinRZ
2261
254
    if (SizeInBytes % MinRZ) 
RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ)221
;
2262
254
    assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0);
2263
254
    Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2264
254
2265
254
    StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2266
254
    Constant *NewInitializer = ConstantStruct::get(
2267
254
        NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2268
254
2269
254
    // Create a new global variable with enough space for a redzone.
2270
254
    GlobalValue::LinkageTypes Linkage = G->getLinkage();
2271
254
    if (G->isConstant() && 
Linkage == GlobalValue::PrivateLinkage34
)
2272
24
      Linkage = GlobalValue::InternalLinkage;
2273
254
    GlobalVariable *NewGlobal =
2274
254
        new GlobalVariable(M, NewTy, G->isConstant(), Linkage, NewInitializer,
2275
254
                           "", G, G->getThreadLocalMode());
2276
254
    NewGlobal->copyAttributesFrom(G);
2277
254
    NewGlobal->setComdat(G->getComdat());
2278
254
    NewGlobal->setAlignment(MinRZ);
2279
254
    // Don't fold globals with redzones. ODR violation detector and redzone
2280
254
    // poisoning implicitly creates a dependence on the global's address, so it
2281
254
    // is no longer valid for it to be marked unnamed_addr.
2282
254
    NewGlobal->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
2283
254
2284
254
    // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2285
254
    if (TargetTriple.isOSBinFormatMachO() && 
!G->hasSection()73
&&
2286
254
        
G->isConstant()73
) {
2287
14
      auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2288
14
      if (Seq && 
Seq->isCString()9
)
2289
7
        NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2290
14
    }
2291
254
2292
254
    // Transfer the debug info.  The payload starts at offset zero so we can
2293
254
    // copy the debug info over as is.
2294
254
    SmallVector<DIGlobalVariableExpression *, 1> GVs;
2295
254
    G->getDebugInfo(GVs);
2296
254
    for (auto *GV : GVs)
2297
1
      NewGlobal->addDebugInfo(GV);
2298
254
2299
254
    Value *Indices2[2];
2300
254
    Indices2[0] = IRB.getInt32(0);
2301
254
    Indices2[1] = IRB.getInt32(0);
2302
254
2303
254
    G->replaceAllUsesWith(
2304
254
        ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
2305
254
    NewGlobal->takeName(G);
2306
254
    G->eraseFromParent();
2307
254
    NewGlobals[i] = NewGlobal;
2308
254
2309
254
    Constant *SourceLoc;
2310
254
    if (!MD.SourceLoc.empty()) {
2311
145
      auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc);
2312
145
      SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy);
2313
145
    } else {
2314
109
      SourceLoc = ConstantInt::get(IntptrTy, 0);
2315
109
    }
2316
254
2317
254
    Constant *ODRIndicator = ConstantExpr::getNullValue(IRB.getInt8PtrTy());
2318
254
    GlobalValue *InstrumentedGlobal = NewGlobal;
2319
254
2320
254
    bool CanUsePrivateAliases =
2321
254
        TargetTriple.isOSBinFormatELF() || 
TargetTriple.isOSBinFormatMachO()88
||
2322
254
        
TargetTriple.isOSBinFormatWasm()15
;
2323
254
    if (CanUsePrivateAliases && 
UsePrivateAlias239
) {
2324
10
      // Create local alias for NewGlobal to avoid crash on ODR between
2325
10
      // instrumented and non-instrumented libraries.
2326
10
      InstrumentedGlobal =
2327
10
          GlobalAlias::create(GlobalValue::PrivateLinkage, "", NewGlobal);
2328
10
    }
2329
254
2330
254
    // ODR should not happen for local linkage.
2331
254
    if (NewGlobal->hasLocalLinkage()) {
2332
55
      ODRIndicator = ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1),
2333
55
                                               IRB.getInt8PtrTy());
2334
199
    } else if (UseOdrIndicator) {
2335
7
      // With local aliases, we need to provide another externally visible
2336
7
      // symbol __odr_asan_XXX to detect ODR violation.
2337
7
      auto *ODRIndicatorSym =
2338
7
          new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2339
7
                             Constant::getNullValue(IRB.getInt8Ty()),
2340
7
                             kODRGenPrefix + NameForGlobal, nullptr,
2341
7
                             NewGlobal->getThreadLocalMode());
2342
7
2343
7
      // Set meaningful attributes for indicator symbol.
2344
7
      ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2345
7
      ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2346
7
      ODRIndicatorSym->setAlignment(1);
2347
7
      ODRIndicator = ODRIndicatorSym;
2348
7
    }
2349
254
2350
254
    Constant *Initializer = ConstantStruct::get(
2351
254
        GlobalStructTy,
2352
254
        ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2353
254
        ConstantInt::get(IntptrTy, SizeInBytes),
2354
254
        ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2355
254
        ConstantExpr::getPointerCast(Name, IntptrTy),
2356
254
        ConstantExpr::getPointerCast(ModuleName, IntptrTy),
2357
254
        ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc,
2358
254
        ConstantExpr::getPointerCast(ODRIndicator, IntptrTy));
2359
254
2360
254
    if (ClInitializers && MD.IsDynInit) 
HasDynamicallyInitializedGlobals = true77
;
2361
254
2362
254
    LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2363
254
2364
254
    Initializers[i] = Initializer;
2365
254
  }
2366
71
2367
71
  // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2368
71
  // ConstantMerge'ing them.
2369
71
  SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2370
325
  for (size_t i = 0; i < n; 
i++254
) {
2371
254
    GlobalVariable *G = NewGlobals[i];
2372
254
    if (G->getName().empty()) 
continue2
;
2373
252
    GlobalsToAddToUsedList.push_back(G);
2374
252
  }
2375
71
  appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2376
71
2377
71
  std::string ELFUniqueModuleId =
2378
71
      (UseGlobalsGC && 
TargetTriple.isOSBinFormatELF()34
) ?
getUniqueModuleId(&M)20
2379
71
                                                        : 
""51
;
2380
71
2381
71
  if (!ELFUniqueModuleId.empty()) {
2382
19
    InstrumentGlobalsELF(IRB, M, NewGlobals, Initializers, ELFUniqueModuleId);
2383
19
    *CtorComdat = true;
2384
52
  } else if (UseGlobalsGC && 
TargetTriple.isOSBinFormatCOFF()15
) {
2385
5
    InstrumentGlobalsCOFF(IRB, M, NewGlobals, Initializers);
2386
47
  } else if (UseGlobalsGC && 
ShouldUseMachOGlobalsSection()10
) {
2387
7
    InstrumentGlobalsMachO(IRB, M, NewGlobals, Initializers);
2388
40
  } else {
2389
40
    InstrumentGlobalsWithMetadataArray(IRB, M, NewGlobals, Initializers);
2390
40
  }
2391
71
2392
71
  // Create calls for poisoning before initializers run and unpoisoning after.
2393
71
  if (HasDynamicallyInitializedGlobals)
2394
20
    createInitializerPoisonCalls(M, ModuleName);
2395
71
2396
71
  LLVM_DEBUG(dbgs() << M);
2397
71
  return true;
2398
71
}
2399
2400
167
int ModuleAddressSanitizer::GetAsanVersion(const Module &M) const {
2401
167
  int LongSize = M.getDataLayout().getPointerSizeInBits();
2402
167
  bool isAndroid = Triple(M.getTargetTriple()).isAndroid();
2403
167
  int Version = 8;
2404
167
  // 32-bit Android is one version ahead because of the switch to dynamic
2405
167
  // shadow.
2406
167
  Version += (LongSize == 32 && 
isAndroid23
);
2407
167
  return Version;
2408
167
}
2409
2410
173
bool ModuleAddressSanitizer::instrumentModule(Module &M) {
2411
173
  initializeCallbacks(M);
2412
173
2413
173
  if (CompileKernel)
2414
6
    return false;
2415
167
2416
167
  // Create a module constructor. A destructor is created lazily because not all
2417
167
  // platforms, and not all modules need it.
2418
167
  std::string VersionCheckName =
2419
167
      kAsanVersionCheckNamePrefix + std::to_string(GetAsanVersion(M));
2420
167
  std::tie(AsanCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions(
2421
167
      M, kAsanModuleCtorName, kAsanInitName, /*InitArgTypes=*/{},
2422
167
      /*InitArgs=*/{}, VersionCheckName);
2423
167
2424
167
  bool CtorComdat = true;
2425
167
  bool Changed = false;
2426
167
  // TODO(glider): temporarily disabled globals instrumentation for KASan.
2427
167
  if (ClGlobals) {
2428
167
    IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2429
167
    Changed |= InstrumentGlobals(IRB, M, &CtorComdat);
2430
167
  }
2431
167
2432
167
  // Put the constructor and destructor in comdat if both
2433
167
  // (1) global instrumentation is not TU-specific
2434
167
  // (2) target is ELF.
2435
167
  if (UseCtorComdat && 
TargetTriple.isOSBinFormatELF()115
&&
CtorComdat82
) {
2436
79
    AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2437
79
    appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority,
2438
79
                        AsanCtorFunction);
2439
79
    if (AsanDtorFunction) {
2440
19
      AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2441
19
      appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority,
2442
19
                          AsanDtorFunction);
2443
19
    }
2444
88
  } else {
2445
88
    appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority);
2446
88
    if (AsanDtorFunction)
2447
47
      appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority);
2448
88
  }
2449
167
2450
167
  return Changed;
2451
167
}
2452
2453
762
void AddressSanitizer::initializeCallbacks(Module &M) {
2454
762
  IRBuilder<> IRB(*C);
2455
762
  // Create __asan_report* callbacks.
2456
762
  // IsWrite, TypeSize and Exp are encoded in the function name.
2457
2.28k
  for (int Exp = 0; Exp < 2; 
Exp++1.52k
) {
2458
4.57k
    for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; 
AccessIsWrite++3.04k
) {
2459
3.04k
      const std::string TypeStr = AccessIsWrite ? 
"store"1.52k
:
"load"1.52k
;
2460
3.04k
      const std::string ExpStr = Exp ? 
"exp_"1.52k
:
""1.52k
;
2461
3.04k
      const std::string EndingStr = Recover ? 
"_noabort"308
:
""2.74k
;
2462
3.04k
2463
3.04k
      SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2464
3.04k
      SmallVector<Type *, 2> Args1{1, IntptrTy};
2465
3.04k
      if (Exp) {
2466
1.52k
        Type *ExpType = Type::getInt32Ty(*C);
2467
1.52k
        Args2.push_back(ExpType);
2468
1.52k
        Args1.push_back(ExpType);
2469
1.52k
      }
2470
3.04k
      AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2471
3.04k
          kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2472
3.04k
          FunctionType::get(IRB.getVoidTy(), Args2, false));
2473
3.04k
2474
3.04k
      AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2475
3.04k
          ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2476
3.04k
          FunctionType::get(IRB.getVoidTy(), Args2, false));
2477
3.04k
2478
18.2k
      for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2479
15.2k
           AccessSizeIndex++) {
2480
15.2k
        const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2481
15.2k
        AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2482
15.2k
            M.getOrInsertFunction(
2483
15.2k
                kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2484
15.2k
                FunctionType::get(IRB.getVoidTy(), Args1, false));
2485
15.2k
2486
15.2k
        AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2487
15.2k
            M.getOrInsertFunction(
2488
15.2k
                ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2489
15.2k
                FunctionType::get(IRB.getVoidTy(), Args1, false));
2490
15.2k
      }
2491
3.04k
    }
2492
1.52k
  }
2493
762
2494
762
  const std::string MemIntrinCallbackPrefix =
2495
762
      CompileKernel ? 
std::string("")10
:
ClMemoryAccessCallbackPrefix752
;
2496
762
  AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2497
762
                                      IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
2498
762
                                      IRB.getInt8PtrTy(), IntptrTy);
2499
762
  AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
2500
762
                                     IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
2501
762
                                     IRB.getInt8PtrTy(), IntptrTy);
2502
762
  AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2503
762
                                     IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
2504
762
                                     IRB.getInt32Ty(), IntptrTy);
2505
762
2506
762
  AsanHandleNoReturnFunc =
2507
762
      M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2508
762
2509
762
  AsanPtrCmpFunction =
2510
762
      M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2511
762
  AsanPtrSubFunction =
2512
762
      M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2513
762
  // We insert an empty inline asm after __asan_report* to avoid callback merge.
2514
762
  EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
2515
762
                            StringRef(""), StringRef(""),
2516
762
                            /*hasSideEffects=*/true);
2517
762
  if (Mapping.InGlobal)
2518
3
    AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2519
3
                                           ArrayType::get(IRB.getInt8Ty(), 0));
2520
762
}
2521
2522
833
bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2523
833
  // For each NSObject descendant having a +load method, this method is invoked
2524
833
  // by the ObjC runtime before any of the static constructors is called.
2525
833
  // Therefore we need to instrument such methods with a call to __asan_init
2526
833
  // at the beginning in order to initialize our runtime before any access to
2527
833
  // the shadow memory.
2528
833
  // We cannot just ignore these methods, because they may call other
2529
833
  // instrumented functions.
2530
833
  if (F.getName().find(" load]") != std::string::npos) {
2531
1
    FunctionCallee AsanInitFunction =
2532
1
        declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2533
1
    IRBuilder<> IRB(&F.front(), F.front().begin());
2534
1
    IRB.CreateCall(AsanInitFunction, {});
2535
1
    return true;
2536
1
  }
2537
832
  return false;
2538
832
}
2539
2540
762
void AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2541
762
  // Generate code only when dynamic addressing is needed.
2542
762
  if (Mapping.Offset != kDynamicShadowSentinel)
2543
736
    return;
2544
26
2545
26
  IRBuilder<> IRB(&F.front().front());
2546
26
  if (Mapping.InGlobal) {
2547
3
    if (ClWithIfuncSuppressRemat) {
2548
1
      // An empty inline asm with input reg == output reg.
2549
1
      // An opaque pointer-to-int cast, basically.
2550
1
      InlineAsm *Asm = InlineAsm::get(
2551
1
          FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2552
1
          StringRef(""), StringRef("=r,0"),
2553
1
          /*hasSideEffects=*/false);
2554
1
      LocalDynamicShadow =
2555
1
          IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2556
2
    } else {
2557
2
      LocalDynamicShadow =
2558
2
          IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2559
2
    }
2560
23
  } else {
2561
23
    Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2562
23
        kAsanShadowMemoryDynamicAddress, IntptrTy);
2563
23
    LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
2564
23
  }
2565
26
}
2566
2567
762
void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
2568
762
  // Find the one possible call to llvm.localescape and pre-mark allocas passed
2569
762
  // to it as uninteresting. This assumes we haven't started processing allocas
2570
762
  // yet. This check is done up front because iterating the use list in
2571
762
  // isInterestingAlloca would be algorithmically slower.
2572
762
  assert(ProcessedAllocas.empty() && "must process localescape before allocas");
2573
762
2574
762
  // Try to get the declaration of llvm.localescape. If it's not in the module,
2575
762
  // we can exit early.
2576
762
  if (!F.getParent()->getFunction("llvm.localescape")) 
return756
;
2577
6
2578
6
  // Look for a call to llvm.localescape call in the entry block. It can't be in
2579
6
  // any other block.
2580
16
  
for (Instruction &I : F.getEntryBlock())6
{
2581
16
    IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
2582
16
    if (II && 
II->getIntrinsicID() == Intrinsic::localescape2
) {
2583
2
      // We found a call. Mark all the allocas passed in as uninteresting.
2584
2
      for (Value *Arg : II->arg_operands()) {
2585
2
        AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
2586
2
        assert(AI && AI->isStaticAlloca() &&
2587
2
               "non-static alloca arg to localescape");
2588
2
        ProcessedAllocas[AI] = false;
2589
2
      }
2590
2
      break;
2591
2
    }
2592
16
  }
2593
6
}
2594
2595
bool AddressSanitizer::instrumentFunction(Function &F,
2596
839
                                          const TargetLibraryInfo *TLI) {
2597
839
  if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) 
return false4
;
2598
835
  if (!ClDebugFunc.empty() && 
ClDebugFunc == F.getName()0
)
return false0
;
2599
835
  if (F.getName().startswith("__asan_")) 
return false2
;
2600
833
2601
833
  bool FunctionModified = false;
2602
833
2603
833
  // If needed, insert __asan_init before checking for SanitizeAddress attr.
2604
833
  // This function needs to be called even if the function body is not
2605
833
  // instrumented.
2606
833
  if (maybeInsertAsanInitAtFunctionEntry(F))
2607
1
    FunctionModified = true;
2608
833
2609
833
  // Leave if the function doesn't need instrumentation.
2610
833
  if (!F.hasFnAttribute(Attribute::SanitizeAddress)) 
return FunctionModified71
;
2611
762
2612
762
  LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
2613
762
2614
762
  initializeCallbacks(*F.getParent());
2615
762
2616
762
  FunctionStateRAII CleanupObj(this);
2617
762
2618
762
  maybeInsertDynamicShadowAtFunctionEntry(F);
2619
762
2620
762
  // We can't instrument allocas used with llvm.localescape. Only static allocas
2621
762
  // can be passed to that intrinsic.
2622
762
  markEscapedLocalAllocas(F);
2623
762
2624
762
  // We want to instrument every address only once per basic block (unless there
2625
762
  // are calls between uses).
2626
762
  SmallPtrSet<Value *, 16> TempsToInstrument;
2627
762
  SmallVector<Instruction *, 16> ToInstrument;
2628
762
  SmallVector<Instruction *, 8> NoReturnCalls;
2629
762
  SmallVector<BasicBlock *, 16> AllBlocks;
2630
762
  SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
2631
762
  int NumAllocas = 0;
2632
762
  bool IsWrite;
2633
762
  unsigned Alignment;
2634
762
  uint64_t TypeSize;
2635
762
2636
762
  // Fill the set of memory operations to instrument.
2637
888
  for (auto &BB : F) {
2638
888
    AllBlocks.push_back(&BB);
2639
888
    TempsToInstrument.clear();
2640
888
    int NumInsnsPerBB = 0;
2641
4.25k
    for (auto &Inst : BB) {
2642
4.25k
      if (LooksLikeCodeInBug11395(&Inst)) 
return false3
;
2643
4.25k
      Value *MaybeMask = nullptr;
2644
4.25k
      if (Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
2645
449
                                                  &Alignment, &MaybeMask)) {
2646
449
        if (ClOpt && 
ClOptSameTemp447
) {
2647
447
          // If we have a mask, skip instrumentation if we've already
2648
447
          // instrumented the full object. But don't add to TempsToInstrument
2649
447
          // because we might get another load/store with a different mask.
2650
447
          if (MaybeMask) {
2651
28
            if (TempsToInstrument.count(Addr))
2652
4
              continue; // We've seen this (whole) temp in the current BB.
2653
419
          } else {
2654
419
            if (!TempsToInstrument.insert(Addr).second)
2655
10
              continue; // We've seen this temp in the current BB.
2656
3.80k
          }
2657
447
        }
2658
3.80k
      } else if (((ClInvalidPointerPairs || 
ClInvalidPointerCmp3.79k
) &&
2659
3.80k
                  
isInterestingPointerComparison(&Inst)16
) ||
2660
3.80k
                 
(3.79k
(3.79k
ClInvalidPointerPairs3.79k
||
ClInvalidPointerSub3.79k
) &&
2661
3.79k
                  
isInterestingPointerSubtraction(&Inst)15
)) {
2662
4
        PointerComparisonsOrSubtracts.push_back(&Inst);
2663
4
        continue;
2664
3.79k
      } else if (isa<MemIntrinsic>(Inst)) {
2665
21
        // ok, take it.
2666
3.77k
      } else {
2667
3.77k
        if (isa<AllocaInst>(Inst)) 
NumAllocas++535
;
2668
3.77k
        CallSite CS(&Inst);
2669
3.77k
        if (CS) {
2670
772
          // A call inside BB.
2671
772
          TempsToInstrument.clear();
2672
772
          if (CS.doesNotReturn() && 
!CS->getMetadata("nosanitize")11
)
2673
8
            NoReturnCalls.push_back(CS.getInstruction());
2674
772
        }
2675
3.77k
        if (CallInst *CI = dyn_cast<CallInst>(&Inst))
2676
767
          maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI);
2677
3.77k
        continue;
2678
3.77k
      }
2679
456
      ToInstrument.push_back(&Inst);
2680
456
      NumInsnsPerBB++;
2681
456
      if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) 
break0
;
2682
456
    }
2683
888
  }
2684
762
2685
762
  bool UseCalls =
2686
759
      (ClInstrumentationWithCallsThreshold >= 0 &&
2687
759
       ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold);
2688
759
  const DataLayout &DL = F.getParent()->getDataLayout();
2689
759
  ObjectSizeOpts ObjSizeOpts;
2690
759
  ObjSizeOpts.RoundToAlign = true;
2691
759
  ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), ObjSizeOpts);
2692
759
2693
759
  // Instrument.
2694
759
  int NumInstrumented = 0;
2695
759
  for (auto Inst : ToInstrument) {
2696
447
    if (ClDebugMin < 0 || 
ClDebugMax < 00
||
2697
447
        
(0
NumInstrumented >= ClDebugMin0
&&
NumInstrumented <= ClDebugMax0
)) {
2698
447
      if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment))
2699
426
        instrumentMop(ObjSizeVis, Inst, UseCalls,
2700
426
                      F.getParent()->getDataLayout());
2701
21
      else
2702
21
        instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
2703
447
    }
2704
447
    NumInstrumented++;
2705
447
  }
2706
759
2707
759
  FunctionStackPoisoner FSP(F, *this);
2708
759
  bool ChangedStack = FSP.runOnFunction();
2709
759
2710
759
  // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
2711
759
  // See e.g. https://github.com/google/sanitizers/issues/37
2712
759
  for (auto CI : NoReturnCalls) {
2713
8
    IRBuilder<> IRB(CI);
2714
8
    IRB.CreateCall(AsanHandleNoReturnFunc, {});
2715
8
  }
2716
759
2717
759
  for (auto Inst : PointerComparisonsOrSubtracts) {
2718
4
    instrumentPointerComparisonOrSubtraction(Inst);
2719
4
    NumInstrumented++;
2720
4
  }
2721
759
2722
759
  if (NumInstrumented > 0 || 
ChangedStack464
||
!NoReturnCalls.empty()420
)
2723
343
    FunctionModified = true;
2724
759
2725
759
  LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
2726
759
                    << F << "\n");
2727
759
2728
759
  return FunctionModified;
2729
762
}
2730
2731
// Workaround for bug 11395: we don't want to instrument stack in functions
2732
// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
2733
// FIXME: remove once the bug 11395 is fixed.
2734
4.25k
bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
2735
4.25k
  if (LongSize != 32) 
return false4.09k
;
2736
156
  CallInst *CI = dyn_cast<CallInst>(I);
2737
156
  if (!CI || 
!CI->isInlineAsm()9
)
return false151
;
2738
5
  if (CI->getNumArgOperands() <= 5) 
return false2
;
2739
3
  // We have inline assembly with quite a few arguments.
2740
3
  return true;
2741
3
}
2742
2743
87
void FunctionStackPoisoner::initializeCallbacks(Module &M) {
2744
87
  IRBuilder<> IRB(*C);
2745
1.04k
  for (int i = 0; i <= kMaxAsanStackMallocSizeClass; 
i++957
) {
2746
957
    std::string Suffix = itostr(i);
2747
957
    AsanStackMallocFunc[i] = M.getOrInsertFunction(
2748
957
        kAsanStackMallocNameTemplate + Suffix, IntptrTy, IntptrTy);
2749
957
    AsanStackFreeFunc[i] =
2750
957
        M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
2751
957
                              IRB.getVoidTy(), IntptrTy, IntptrTy);
2752
957
  }
2753
87
  if (ASan.UseAfterScope) {
2754
25
    AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
2755
25
        kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2756
25
    AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
2757
25
        kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2758
25
  }
2759
87
2760
522
  for (size_t Val : {0x00, 0xf1, 0xf2, 0xf3, 0xf5, 0xf8}) {
2761
522
    std::ostringstream Name;
2762
522
    Name << kAsanSetShadowPrefix;
2763
522
    Name << std::setw(2) << std::setfill('0') << std::hex << Val;
2764
522
    AsanSetShadowFunc[Val] =
2765
522
        M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
2766
522
  }
2767
87
2768
87
  AsanAllocaPoisonFunc = M.getOrInsertFunction(
2769
87
      kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
2770
87
  AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
2771
87
      kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
2772
87
}
2773
2774
void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
2775
                                               ArrayRef<uint8_t> ShadowBytes,
2776
                                               size_t Begin, size_t End,
2777
                                               IRBuilder<> &IRB,
2778
322
                                               Value *ShadowBase) {
2779
322
  if (Begin >= End)
2780
40
    return;
2781
282
2782
282
  const size_t LargestStoreSizeInBytes =
2783
282
      std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
2784
282
2785
282
  const bool IsLittleEndian = F.getParent()->getDataLayout().isLittleEndian();
2786
282
2787
282
  // Poison given range in shadow using larges store size with out leading and
2788
282
  // trailing zeros in ShadowMask. Zeros never change, so they need neither
2789
282
  // poisoning nor up-poisoning. Still we don't mind if some of them get into a
2790
282
  // middle of a store.
2791
2.03k
  for (size_t i = Begin; i < End;) {
2792
1.75k
    if (!ShadowMask[i]) {
2793
1.22k
      assert(!ShadowBytes[i]);
2794
1.22k
      ++i;
2795
1.22k
      continue;
2796
1.22k
    }
2797
532
2798
532
    size_t StoreSizeInBytes = LargestStoreSizeInBytes;
2799
532
    // Fit store size into the range.
2800
959
    while (StoreSizeInBytes > End - i)
2801
427
      StoreSizeInBytes /= 2;
2802
532
2803
532
    // Minimize store size by trimming trailing zeros.
2804
696
    for (size_t j = StoreSizeInBytes - 1; j && 
!ShadowMask[i + j]610
;
--j164
) {
2805
200
      while (j <= StoreSizeInBytes / 2)
2806
36
        StoreSizeInBytes /= 2;
2807
164
    }
2808
532
2809
532
    uint64_t Val = 0;
2810
3.38k
    for (size_t j = 0; j < StoreSizeInBytes; 
j++2.85k
) {
2811
2.85k
      if (IsLittleEndian)
2812
2.73k
        Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
2813
115
      else
2814
115
        Val = (Val << 8) | ShadowBytes[i + j];
2815
2.85k
    }
2816
532
2817
532
    Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
2818
532
    Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
2819
532
    IRB.CreateAlignedStore(
2820
532
        Poison, IRB.CreateIntToPtr(Ptr, Poison->getType()->getPointerTo()), 1);
2821
532
2822
532
    i += StoreSizeInBytes;
2823
532
  }
2824
282
}
2825
2826
void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
2827
                                         ArrayRef<uint8_t> ShadowBytes,
2828
243
                                         IRBuilder<> &IRB, Value *ShadowBase) {
2829
243
  copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
2830
243
}
2831
2832
void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
2833
                                         ArrayRef<uint8_t> ShadowBytes,
2834
                                         size_t Begin, size_t End,
2835
297
                                         IRBuilder<> &IRB, Value *ShadowBase) {
2836
297
  assert(ShadowMask.size() == ShadowBytes.size());
2837
297
  size_t Done = Begin;
2838
2.27k
  for (size_t i = Begin, j = Begin + 1; i < End; 
i = j++1.97k
) {
2839
1.97k
    if (!ShadowMask[i]) {
2840
1.34k
      assert(!ShadowBytes[i]);
2841
1.34k
      continue;
2842
1.34k
    }
2843
637
    uint8_t Val = ShadowBytes[i];
2844
637
    if (!AsanSetShadowFunc[Val])
2845
110
      continue;
2846
527
2847
527
    // Skip same values.
2848
5.56k
    
for (; 527
j < End &&
ShadowMask[j]5.28k
&&
Val == ShadowBytes[j]5.15k
;
++j5.03k
) {
2849
5.03k
    }
2850
527
2851
527
    if (j - i >= ClMaxInlinePoisoningSize) {
2852
25
      copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
2853
25
      IRB.CreateCall(AsanSetShadowFunc[Val],
2854
25
                     {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
2855
25
                      ConstantInt::get(IntptrTy, j - i)});
2856
25
      Done = j;
2857
25
    }
2858
527
  }
2859
297
2860
297
  copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
2861
297
}
2862
2863
// Fake stack allocator (asan_fake_stack.h) has 11 size classes
2864
// for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
2865
66
static int StackMallocSizeClass(uint64_t LocalStackSize) {
2866
66
  assert(LocalStackSize <= kMaxStackMallocSize);
2867
66
  uint64_t MaxSize = kMinStackMallocSize;
2868
78
  for (int i = 0;; i++, MaxSize *= 2)
2869
144
    if (LocalStackSize <= MaxSize) 
return i66
;
2870
66
  
llvm_unreachable0
("impossible LocalStackSize");
2871
66
}
2872
2873
759
void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
2874
759
  Instruction *CopyInsertPoint = &F.front().front();
2875
759
  if (CopyInsertPoint == ASan.LocalDynamicShadow) {
2876
24
    // Insert after the dynamic shadow location is determined
2877
24
    CopyInsertPoint = CopyInsertPoint->getNextNode();
2878
24
    assert(CopyInsertPoint);
2879
24
  }
2880
759
  IRBuilder<> IRB(CopyInsertPoint);
2881
759
  const DataLayout &DL = F.getParent()->getDataLayout();
2882
759
  for (Argument &Arg : F.args()) {
2883
612
    if (Arg.hasByValAttr()) {
2884
8
      Type *Ty = Arg.getType()->getPointerElementType();
2885
8
      unsigned Align = Arg.getParamAlignment();
2886
8
      if (Align == 0) 
Align = DL.getABITypeAlignment(Ty)4
;
2887
8
2888
8
      AllocaInst *AI = IRB.CreateAlloca(
2889
8
          Ty, nullptr,
2890
8
          (Arg.hasName() ? 
Arg.getName()4
:
"Arg" + Twine(Arg.getArgNo())4
) +
2891
8
              ".byval");
2892
8
      AI->setAlignment(Align);
2893
8
      Arg.replaceAllUsesWith(AI);
2894
8
2895
8
      uint64_t AllocSize = DL.getTypeAllocSize(Ty);
2896
8
      IRB.CreateMemCpy(AI, Align, &Arg, Align, AllocSize);
2897
8
    }
2898
612
  }
2899
759
}
2900
2901
PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
2902
                                          Value *ValueIfTrue,
2903
                                          Instruction *ThenTerm,
2904
132
                                          Value *ValueIfFalse) {
2905
132
  PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
2906
132
  BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
2907
132
  PHI->addIncoming(ValueIfFalse, CondBlock);
2908
132
  BasicBlock *ThenBlock = ThenTerm->getParent();
2909
132
  PHI->addIncoming(ValueIfTrue, ThenBlock);
2910
132
  return PHI;
2911
132
}
2912
2913
Value *FunctionStackPoisoner::createAllocaForLayout(
2914
87
    IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
2915
87
  AllocaInst *Alloca;
2916
87
  if (Dynamic) {
2917
76
    Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
2918
76
                              ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
2919
76
                              "MyAlloca");
2920
76
  } else {
2921
11
    Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
2922
11
                              nullptr, "MyAlloca");
2923
11
    assert(Alloca->isStaticAlloca());
2924
11
  }
2925
87
  assert((ClRealignStack & (ClRealignStack - 1)) == 0);
2926
87
  size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack);
2927
87
  Alloca->setAlignment(FrameAlignment);
2928
87
  return IRB.CreatePointerCast(Alloca, IntptrTy);
2929
87
}
2930
2931
2
void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
2932
2
  BasicBlock &FirstBB = *F.begin();
2933
2
  IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
2934
2
  DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
2935
2
  IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
2936
2
  DynamicAllocaLayout->setAlignment(32);
2937
2
}
2938
2939
87
void FunctionStackPoisoner::processDynamicAllocas() {
2940
87
  if (!ClInstrumentDynamicAllocas || 
DynamicAllocaVec.empty()83
) {
2941
85
    assert(DynamicAllocaPoisonCallVec.empty());
2942
85
    return;
2943
85
  }
2944
2
2945
2
  // Insert poison calls for lifetime intrinsics for dynamic allocas.
2946
2
  for (const auto &APC : DynamicAllocaPoisonCallVec) {
2947
2
    assert(APC.InsBefore);
2948
2
    assert(APC.AI);
2949
2
    assert(ASan.isInterestingAlloca(*APC.AI));
2950
2
    assert(!APC.AI->isStaticAlloca());
2951
2
2952
2
    IRBuilder<> IRB(APC.InsBefore);
2953
2
    poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
2954
2
    // Dynamic allocas will be unpoisoned unconditionally below in
2955
2
    // unpoisonDynamicAllocas.
2956
2
    // Flag that we need unpoison static allocas.
2957
2
  }
2958
2
2959
2
  // Handle dynamic allocas.
2960
2
  createDynamicAllocasInitStorage();
2961
2
  for (auto &AI : DynamicAllocaVec)
2962
2
    handleDynamicAllocaCall(AI);
2963
2
  unpoisonDynamicAllocas();
2964
2
}
2965
2966
87
void FunctionStackPoisoner::processStaticAllocas() {
2967
87
  if (AllocaVec.empty()) {
2968
0
    assert(StaticAllocaPoisonCallVec.empty());
2969
0
    return;
2970
0
  }
2971
87
2972
87
  int StackMallocIdx = -1;
2973
87
  DebugLoc EntryDebugLocation;
2974
87
  if (auto SP = F.getSubprogram())
2975
5
    EntryDebugLocation = DebugLoc::get(SP->getScopeLine(), 0, SP);
2976
87
2977
87
  Instruction *InsBefore = AllocaVec[0];
2978
87
  IRBuilder<> IRB(InsBefore);
2979
87
  IRB.SetCurrentDebugLocation(EntryDebugLocation);
2980
87
2981
87
  // Make sure non-instrumented allocas stay in the entry block. Otherwise,
2982
87
  // debug info is broken, because only entry-block allocas are treated as
2983
87
  // regular stack slots.
2984
87
  auto InsBeforeB = InsBefore->getParent();
2985
87
  assert(InsBeforeB == &F.getEntryBlock());
2986
87
  for (auto *AI : StaticAllocasToMoveUp)
2987
11
    if (AI->getParent() == InsBeforeB)
2988
11
      AI->moveBefore(InsBefore);
2989
87
2990
87
  // If we have a call to llvm.localescape, keep it in the entry block.
2991
87
  if (LocalEscapeCall) 
LocalEscapeCall->moveBefore(InsBefore)2
;
2992
87
2993
87
  SmallVector<ASanStackVariableDescription, 16> SVD;
2994
87
  SVD.reserve(AllocaVec.size());
2995
152
  for (AllocaInst *AI : AllocaVec) {
2996
152
    ASanStackVariableDescription D = {AI->getName().data(),
2997
152
                                      ASan.getAllocaSizeInBytes(*AI),
2998
152
                                      0,
2999
152
                                      AI->getAlignment(),
3000
152
                                      AI,
3001
152
                                      0,
3002
152
                                      0};
3003
152
    SVD.push_back(D);
3004
152
  }
3005
87
3006
87
  // Minimal header size (left redzone) is 4 pointers,
3007
87
  // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3008
87
  size_t Granularity = 1ULL << Mapping.Scale;
3009
87
  size_t MinHeaderSize = std::max((size_t)ASan.LongSize / 2, Granularity);
3010
87
  const ASanStackFrameLayout &L =
3011
87
      ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3012
87
3013
87
  // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3014
87
  DenseMap<const AllocaInst *, ASanStackVariableDescription *> AllocaToSVDMap;
3015
87
  for (auto &Desc : SVD)
3016
152
    AllocaToSVDMap[Desc.AI] = &Desc;
3017
87
3018
87
  // Update SVD with information from lifetime intrinsics.
3019
87
  for (const auto &APC : StaticAllocaPoisonCallVec) {
3020
54
    assert(APC.InsBefore);
3021
54
    assert(APC.AI);
3022
54
    assert(ASan.isInterestingAlloca(*APC.AI));
3023
54
    assert(APC.AI->isStaticAlloca());
3024
54
3025
54
    ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3026
54
    Desc.LifetimeSize = Desc.Size;
3027
54
    if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3028
8
      if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3029
8
        if (LifetimeLoc->getFile() == FnLoc->getFile())
3030
4
          if (unsigned Line = LifetimeLoc->getLine())
3031
4
            Desc.Line = std::min(Desc.Line ? 
Desc.Line2
:
Line2
, Line);
3032
8
      }
3033
8
    }
3034
54
  }
3035
87
3036
87
  auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3037
87
  LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3038
87
  uint64_t LocalStackSize = L.FrameSize;
3039
87
  bool DoStackMalloc = ClUseAfterReturn && 
!ASan.CompileKernel72
&&
3040
87
                       
LocalStackSize <= kMaxStackMallocSize72
;
3041
87
  bool DoDynamicAlloca = ClDynamicAllocaStack;
3042
87
  // Don't do dynamic alloca or stack malloc if:
3043
87
  // 1) There is inline asm: too often it makes assumptions on which registers
3044
87
  //    are available.
3045
87
  // 2) There is a returns_twice call (typically setjmp), which is
3046
87
  //    optimization-hostile, and doesn't play well with introduced indirect
3047
87
  //    register-relative calculation of local variable addresses.
3048
87
  DoDynamicAlloca &= !HasNonEmptyInlineAsm && 
!HasReturnsTwiceCall83
;
3049
87
  DoStackMalloc &= !HasNonEmptyInlineAsm && 
!HasReturnsTwiceCall83
;
3050
87
3051
87
  Value *StaticAlloca =
3052
87
      DoDynamicAlloca ? 
nullptr76
:
createAllocaForLayout(IRB, L, false)11
;
3053
87
3054
87
  Value *FakeStack;
3055
87
  Value *LocalStackBase;
3056
87
  Value *LocalStackBaseAlloca;
3057
87
  uint8_t DIExprFlags = DIExpression::ApplyOffset;
3058
87
3059
87
  if (DoStackMalloc) {
3060
66
    LocalStackBaseAlloca =
3061
66
        IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3062
66
    // void *FakeStack = __asan_option_detect_stack_use_after_return
3063
66
    //     ? __asan_stack_malloc_N(LocalStackSize)
3064
66
    //     : nullptr;
3065
66
    // void *LocalStackBase = (FakeStack) ? FakeStack : alloca(LocalStackSize);
3066
66
    Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3067
66
        kAsanOptionDetectUseAfterReturn, IRB.getInt32Ty());
3068
66
    Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3069
66
        IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3070
66
        Constant::getNullValue(IRB.getInt32Ty()));
3071
66
    Instruction *Term =
3072
66
        SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3073
66
    IRBuilder<> IRBIf(Term);
3074
66
    IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
3075
66
    StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3076
66
    assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3077
66
    Value *FakeStackValue =
3078
66
        IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx],
3079
66
                         ConstantInt::get(IntptrTy, LocalStackSize));
3080
66
    IRB.SetInsertPoint(InsBefore);
3081
66
    IRB.SetCurrentDebugLocation(EntryDebugLocation);
3082
66
    FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3083
66
                          ConstantInt::get(IntptrTy, 0));
3084
66
3085
66
    Value *NoFakeStack =
3086
66
        IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
3087
66
    Term = SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3088
66
    IRBIf.SetInsertPoint(Term);
3089
66
    IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
3090
66
    Value *AllocaValue =
3091
66
        DoDynamicAlloca ? 
createAllocaForLayout(IRBIf, L, true)62
:
StaticAlloca4
;
3092
66
3093
66
    IRB.SetInsertPoint(InsBefore);
3094
66
    IRB.SetCurrentDebugLocation(EntryDebugLocation);
3095
66
    LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3096
66
    IRB.SetCurrentDebugLocation(EntryDebugLocation);
3097
66
    IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3098
66
    DIExprFlags |= DIExpression::DerefBefore;
3099
66
  } else {
3100
21
    // void *FakeStack = nullptr;
3101
21
    // void *LocalStackBase = alloca(LocalStackSize);
3102
21
    FakeStack = ConstantInt::get(IntptrTy, 0);
3103
21
    LocalStackBase =
3104
21
        DoDynamicAlloca ? 
createAllocaForLayout(IRB, L, true)14
:
StaticAlloca7
;
3105
21
    LocalStackBaseAlloca = LocalStackBase;
3106
21
  }
3107
87
3108
87
  // Replace Alloca instructions with base+offset.
3109
152
  for (const auto &Desc : SVD) {
3110
152
    AllocaInst *AI = Desc.AI;
3111
152
    replaceDbgDeclareForAlloca(AI, LocalStackBaseAlloca, DIB, DIExprFlags,
3112
152
                               Desc.Offset);
3113
152
    Value *NewAllocaPtr = IRB.CreateIntToPtr(
3114
152
        IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
3115
152
        AI->getType());
3116
152
    AI->replaceAllUsesWith(NewAllocaPtr);
3117
152
  }
3118
87
3119
87
  // The left-most redzone has enough space for at least 4 pointers.
3120
87
  // Write the Magic value to redzone[0].
3121
87
  Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
3122
87
  IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
3123
87
                  BasePlus0);
3124
87
  // Write the frame description constant to redzone[1].
3125
87
  Value *BasePlus1 = IRB.CreateIntToPtr(
3126
87
      IRB.CreateAdd(LocalStackBase,
3127
87
                    ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3128
87
      IntptrPtrTy);
3129
87
  GlobalVariable *StackDescriptionGlobal =
3130
87
      createPrivateGlobalForString(*F.getParent(), DescriptionString,
3131
87
                                   /*AllowMerging*/ true, kAsanGenPrefix);
3132
87
  Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3133
87
  IRB.CreateStore(Description, BasePlus1);
3134
87
  // Write the PC to redzone[2].
3135
87
  Value *BasePlus2 = IRB.CreateIntToPtr(
3136
87
      IRB.CreateAdd(LocalStackBase,
3137
87
                    ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3138
87
      IntptrPtrTy);
3139
87
  IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3140
87
3141
87
  const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3142
87
3143
87
  // Poison the stack red zones at the entry.
3144
87
  Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3145
87
  // As mask we must use most poisoned case: red zones and after scope.
3146
87
  // As bytes we can use either the same or just red zones only.
3147
87
  copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3148
87
3149
87
  if (!StaticAllocaPoisonCallVec.empty()) {
3150
16
    const auto &ShadowInScope = GetShadowBytes(SVD, L);
3151
16
3152
16
    // Poison static allocas near lifetime intrinsics.
3153
54
    for (const auto &APC : StaticAllocaPoisonCallVec) {
3154
54
      const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3155
54
      assert(Desc.Offset % L.Granularity == 0);
3156
54
      size_t Begin = Desc.Offset / L.Granularity;
3157
54
      size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3158
54
3159
54
      IRBuilder<> IRB(APC.InsBefore);
3160
54
      copyToShadow(ShadowAfterScope,
3161
54
                   APC.DoPoison ? 
ShadowAfterScope28
:
ShadowInScope26
, Begin, End,
3162
54
                   IRB, ShadowBase);
3163
54
    }
3164
16
  }
3165
87
3166
87
  SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3167
87
  SmallVector<uint8_t, 64> ShadowAfterReturn;
3168
87
3169
87
  // (Un)poison the stack before all ret instructions.
3170
89
  for (auto Ret : RetVec) {
3171
89
    IRBuilder<> IRBRet(Ret);
3172
89
    // Mark the current frame as retired.
3173
89
    IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3174
89
                       BasePlus0);
3175
89
    if (DoStackMalloc) {
3176
67
      assert(StackMallocIdx >= 0);
3177
67
      // if FakeStack != 0  // LocalStackBase == FakeStack
3178
67
      //     // In use-after-return mode, poison the whole stack frame.
3179
67
      //     if StackMallocIdx <= 4
3180
67
      //         // For small sizes inline the whole thing:
3181
67
      //         memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3182
67
      //         **SavedFlagPtr(FakeStack) = 0
3183
67
      //     else
3184
67
      //         __asan_stack_free_N(FakeStack, LocalStackSize)
3185
67
      // else
3186
67
      //     <This is not a fake stack; unpoison the redzones>
3187
67
      Value *Cmp =
3188
67
          IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
3189
67
      Instruction *ThenTerm, *ElseTerm;
3190
67
      SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3191
67
3192
67
      IRBuilder<> IRBPoison(ThenTerm);
3193
67
      if (StackMallocIdx <= 4) {
3194
67
        int ClassSize = kMinStackMallocSize << StackMallocIdx;
3195
67
        ShadowAfterReturn.resize(ClassSize / L.Granularity,
3196
67
                                 kAsanStackUseAfterReturnMagic);
3197
67
        copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3198
67
                     ShadowBase);
3199
67
        Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3200
67
            FakeStack,
3201
67
            ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3202
67
        Value *SavedFlagPtr = IRBPoison.CreateLoad(
3203
67
            IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3204
67
        IRBPoison.CreateStore(
3205
67
            Constant::getNullValue(IRBPoison.getInt8Ty()),
3206
67
            IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));
3207
67
      } else {
3208
0
        // For larger frames call __asan_stack_free_*.
3209
0
        IRBPoison.CreateCall(
3210
0
            AsanStackFreeFunc[StackMallocIdx],
3211
0
            {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3212
0
      }
3213
67
3214
67
      IRBuilder<> IRBElse(ElseTerm);
3215
67
      copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3216
67
    } else {
3217
22
      copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3218
22
    }
3219
89
  }
3220
87
3221
87
  // We are done. Remove the old unused alloca instructions.
3222
152
  for (auto AI : AllocaVec) AI->eraseFromParent();
3223
87
}
3224
3225
void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3226
2
                                         IRBuilder<> &IRB, bool DoPoison) {
3227
2
  // For now just insert the call to ASan runtime.
3228
2
  Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3229
2
  Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3230
2
  IRB.CreateCall(
3231
2
      DoPoison ? 
AsanPoisonStackMemoryFunc1
:
AsanUnpoisonStackMemoryFunc1
,
3232
2
      {AddrArg, SizeArg});
3233
2
}
3234
3235
// Handling llvm.lifetime intrinsics for a given %alloca:
3236
// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3237
// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3238
//     invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3239
//     could be poisoned by previous llvm.lifetime.end instruction, as the
3240
//     variable may go in and out of scope several times, e.g. in loops).
3241
// (3) if we poisoned at least one %alloca in a function,
3242
//     unpoison the whole stack frame at function exit.
3243
2
void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3244
2
  IRBuilder<> IRB(AI);
3245
2
3246
2
  const unsigned Align = std::max(kAllocaRzSize, AI->getAlignment());
3247
2
  const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3248
2
3249
2
  Value *Zero = Constant::getNullValue(IntptrTy);
3250
2
  Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3251
2
  Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3252
2
3253
2
  // Since we need to extend alloca with additional memory to locate
3254
2
  // redzones, and OldSize is number of allocated blocks with
3255
2
  // ElementSize size, get allocated memory size in bytes by
3256
2
  // OldSize * ElementSize.
3257
2
  const unsigned ElementSize =
3258
2
      F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType());
3259
2
  Value *OldSize =
3260
2
      IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
3261
2
                    ConstantInt::get(IntptrTy, ElementSize));
3262
2
3263
2
  // PartialSize = OldSize % 32
3264
2
  Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3265
2
3266
2
  // Misalign = kAllocaRzSize - PartialSize;
3267
2
  Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3268
2
3269
2
  // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3270
2
  Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3271
2
  Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3272
2
3273
2
  // AdditionalChunkSize = Align + PartialPadding + kAllocaRzSize
3274
2
  // Align is added to locate left redzone, PartialPadding for possible
3275
2
  // partial redzone and kAllocaRzSize for right redzone respectively.
3276
2
  Value *AdditionalChunkSize = IRB.CreateAdd(
3277
2
      ConstantInt::get(IntptrTy, Align + kAllocaRzSize), PartialPadding);
3278
2
3279
2
  Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3280
2
3281
2
  // Insert new alloca with new NewSize and Align params.
3282
2
  AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3283
2
  NewAlloca->setAlignment(Align);
3284
2
3285
2
  // NewAddress = Address + Align
3286
2
  Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3287
2
                                    ConstantInt::get(IntptrTy, Align));
3288
2
3289
2
  // Insert __asan_alloca_poison call for new created alloca.
3290
2
  IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize});
3291
2
3292
2
  // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3293
2
  // for unpoisoning stuff.
3294
2
  IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3295
2
3296
2
  Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3297
2
3298
2
  // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
3299
2
  AI->replaceAllUsesWith(NewAddressPtr);
3300
2
3301
2
  // We are done. Erase old alloca from parent.
3302
2
  AI->eraseFromParent();
3303
2
}
3304
3305
// isSafeAccess returns true if Addr is always inbounds with respect to its
3306
// base object. For example, it is a field access or an array access with
3307
// constant inbounds index.
3308
bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3309
71
                                    Value *Addr, uint64_t TypeSize) const {
3310
71
  SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr);
3311
71
  if (!ObjSizeVis.bothKnown(SizeOffset)) 
return false3
;
3312
68
  uint64_t Size = SizeOffset.first.getZExtValue();
3313
68
  int64_t Offset = SizeOffset.second.getSExtValue();
3314
68
  // Three checks are required to ensure safety:
3315
68
  // . Offset >= 0  (since the offset is given from the base ptr)
3316
68
  // . Size >= Offset  (unsigned)
3317
68
  // . Size - Offset >= NeededSize  (unsigned)
3318
68
  return Offset >= 0 && Size >= uint64_t(Offset) &&
3319
68
         
Size - uint64_t(Offset) >= TypeSize / 866
;
3320
68
}