Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
/// \file
10
/// This file is a part of HWAddressSanitizer, an address sanity checker
11
/// based on tagged addressing.
12
//===----------------------------------------------------------------------===//
13
14
#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
15
#include "llvm/ADT/SmallVector.h"
16
#include "llvm/ADT/StringExtras.h"
17
#include "llvm/ADT/StringRef.h"
18
#include "llvm/ADT/Triple.h"
19
#include "llvm/IR/Attributes.h"
20
#include "llvm/IR/BasicBlock.h"
21
#include "llvm/IR/Constant.h"
22
#include "llvm/IR/Constants.h"
23
#include "llvm/IR/DataLayout.h"
24
#include "llvm/IR/DebugInfoMetadata.h"
25
#include "llvm/IR/DerivedTypes.h"
26
#include "llvm/IR/Function.h"
27
#include "llvm/IR/IRBuilder.h"
28
#include "llvm/IR/InlineAsm.h"
29
#include "llvm/IR/InstVisitor.h"
30
#include "llvm/IR/Instruction.h"
31
#include "llvm/IR/Instructions.h"
32
#include "llvm/IR/IntrinsicInst.h"
33
#include "llvm/IR/Intrinsics.h"
34
#include "llvm/IR/LLVMContext.h"
35
#include "llvm/IR/MDBuilder.h"
36
#include "llvm/IR/Module.h"
37
#include "llvm/IR/Type.h"
38
#include "llvm/IR/Value.h"
39
#include "llvm/Pass.h"
40
#include "llvm/Support/Casting.h"
41
#include "llvm/Support/CommandLine.h"
42
#include "llvm/Support/Debug.h"
43
#include "llvm/Support/raw_ostream.h"
44
#include "llvm/Transforms/Instrumentation.h"
45
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
46
#include "llvm/Transforms/Utils/ModuleUtils.h"
47
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
48
#include <sstream>
49
50
using namespace llvm;
51
52
#define DEBUG_TYPE "hwasan"
53
54
static const char *const kHwasanModuleCtorName = "hwasan.module_ctor";
55
static const char *const kHwasanInitName = "__hwasan_init";
56
57
static const char *const kHwasanShadowMemoryDynamicAddress =
58
    "__hwasan_shadow_memory_dynamic_address";
59
60
// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
61
static const size_t kNumberOfAccessSizes = 5;
62
63
static const size_t kDefaultShadowScale = 4;
64
static const uint64_t kDynamicShadowSentinel =
65
    std::numeric_limits<uint64_t>::max();
66
static const unsigned kPointerTagShift = 56;
67
68
static const unsigned kShadowBaseAlignment = 32;
69
70
static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
71
    "hwasan-memory-access-callback-prefix",
72
    cl::desc("Prefix for memory access callbacks"), cl::Hidden,
73
    cl::init("__hwasan_"));
74
75
static cl::opt<bool>
76
    ClInstrumentWithCalls("hwasan-instrument-with-calls",
77
                cl::desc("instrument reads and writes with callbacks"),
78
                cl::Hidden, cl::init(false));
79
80
static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
81
                                       cl::desc("instrument read instructions"),
82
                                       cl::Hidden, cl::init(true));
83
84
static cl::opt<bool> ClInstrumentWrites(
85
    "hwasan-instrument-writes", cl::desc("instrument write instructions"),
86
    cl::Hidden, cl::init(true));
87
88
static cl::opt<bool> ClInstrumentAtomics(
89
    "hwasan-instrument-atomics",
90
    cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
91
    cl::init(true));
92
93
static cl::opt<bool> ClRecover(
94
    "hwasan-recover",
95
    cl::desc("Enable recovery mode (continue-after-error)."),
96
    cl::Hidden, cl::init(false));
97
98
static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
99
                                       cl::desc("instrument stack (allocas)"),
100
                                       cl::Hidden, cl::init(true));
101
102
static cl::opt<bool> ClUARRetagToZero(
103
    "hwasan-uar-retag-to-zero",
104
    cl::desc("Clear alloca tags before returning from the function to allow "
105
             "non-instrumented and instrumented function calls mix. When set "
106
             "to false, allocas are retagged before returning from the "
107
             "function to detect use after return."),
108
    cl::Hidden, cl::init(true));
109
110
static cl::opt<bool> ClGenerateTagsWithCalls(
111
    "hwasan-generate-tags-with-calls",
112
    cl::desc("generate new tags with runtime library calls"), cl::Hidden,
113
    cl::init(false));
114
115
static cl::opt<int> ClMatchAllTag(
116
    "hwasan-match-all-tag",
117
    cl::desc("don't report bad accesses via pointers with this tag"),
118
    cl::Hidden, cl::init(-1));
119
120
static cl::opt<bool> ClEnableKhwasan(
121
    "hwasan-kernel",
122
    cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
123
    cl::Hidden, cl::init(false));
124
125
// These flags allow to change the shadow mapping and control how shadow memory
126
// is accessed. The shadow mapping looks like:
127
//    Shadow = (Mem >> scale) + offset
128
129
static cl::opt<uint64_t>
130
    ClMappingOffset("hwasan-mapping-offset",
131
                    cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"),
132
                    cl::Hidden, cl::init(0));
133
134
static cl::opt<bool>
135
    ClWithIfunc("hwasan-with-ifunc",
136
                cl::desc("Access dynamic shadow through an ifunc global on "
137
                         "platforms that support this"),
138
                cl::Hidden, cl::init(false));
139
140
static cl::opt<bool> ClWithTls(
141
    "hwasan-with-tls",
142
    cl::desc("Access dynamic shadow through an thread-local pointer on "
143
             "platforms that support this"),
144
    cl::Hidden, cl::init(true));
145
146
static cl::opt<bool>
147
    ClRecordStackHistory("hwasan-record-stack-history",
148
                         cl::desc("Record stack frames with tagged allocations "
149
                                  "in a thread-local ring buffer"),
150
                         cl::Hidden, cl::init(true));
151
static cl::opt<bool>
152
    ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
153
                              cl::desc("instrument memory intrinsics"),
154
                              cl::Hidden, cl::init(true));
155
156
static cl::opt<bool>
157
    ClInstrumentLandingPads("hwasan-instrument-landing-pads",
158
                              cl::desc("instrument landing pads"), cl::Hidden,
159
                              cl::init(true));
160
161
static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
162
                                       cl::desc("inline all checks"),
163
                                       cl::Hidden, cl::init(false));
164
165
namespace {
166
167
/// An instrumentation pass implementing detection of addressability bugs
168
/// using tagged pointers.
169
class HWAddressSanitizer {
170
public:
171
  explicit HWAddressSanitizer(Module &M, bool CompileKernel = false,
172
63
                              bool Recover = false) {
173
63
    this->Recover = ClRecover.getNumOccurrences() > 0 ? 
ClRecover18
:
Recover45
;
174
63
    this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0 ?
175
53
        
ClEnableKhwasan10
: CompileKernel;
176
63
177
63
    initializeModule(M);
178
63
  }
179
180
  bool sanitizeFunction(Function &F);
181
  void initializeModule(Module &M);
182
183
  void initializeCallbacks(Module &M);
184
185
  Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
186
  Value *getDynamicShadowNonTls(IRBuilder<> &IRB);
187
188
  void untagPointerOperand(Instruction *I, Value *Addr);
189
  Value *shadowBase();
190
  Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
191
  void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
192
                                 unsigned AccessSizeIndex,
193
                                 Instruction *InsertBefore);
194
  void instrumentMemIntrinsic(MemIntrinsic *MI);
195
  bool instrumentMemAccess(Instruction *I);
196
  Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
197
                                   uint64_t *TypeSize, unsigned *Alignment,
198
                                   Value **MaybeMask);
199
200
  bool isInterestingAlloca(const AllocaInst &AI);
201
  bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
202
  Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
203
  Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
204
  bool instrumentStack(
205
      SmallVectorImpl<AllocaInst *> &Allocas,
206
      DenseMap<AllocaInst *, std::vector<DbgDeclareInst *>> &AllocaDeclareMap,
207
      SmallVectorImpl<Instruction *> &RetVec, Value *StackTag);
208
  Value *readRegister(IRBuilder<> &IRB, StringRef Name);
209
  bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
210
  Value *getNextTagWithCall(IRBuilder<> &IRB);
211
  Value *getStackBaseTag(IRBuilder<> &IRB);
212
  Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI,
213
                     unsigned AllocaNo);
214
  Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
215
216
  Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
217
  void emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
218
219
private:
220
  LLVMContext *C;
221
  std::string CurModuleUniqueId;
222
  Triple TargetTriple;
223
  FunctionCallee HWAsanMemmove, HWAsanMemcpy, HWAsanMemset;
224
  FunctionCallee HWAsanHandleVfork;
225
226
  /// This struct defines the shadow mapping using the rule:
227
  ///   shadow = (mem >> Scale) + Offset.
228
  /// If InGlobal is true, then
229
  ///   extern char __hwasan_shadow[];
230
  ///   shadow = (mem >> Scale) + &__hwasan_shadow
231
  /// If InTls is true, then
232
  ///   extern char *__hwasan_tls;
233
  ///   shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
234
  struct ShadowMapping {
235
    int Scale;
236
    uint64_t Offset;
237
    bool InGlobal;
238
    bool InTls;
239
240
    void init(Triple &TargetTriple);
241
103
    unsigned getAllocaAlignment() const { return 1U << Scale; }
242
  };
243
  ShadowMapping Mapping;
244
245
  Type *IntptrTy;
246
  Type *Int8PtrTy;
247
  Type *Int8Ty;
248
  Type *Int32Ty;
249
250
  bool CompileKernel;
251
  bool Recover;
252
253
  Function *HwasanCtorFunction;
254
255
  FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
256
  FunctionCallee HwasanMemoryAccessCallbackSized[2];
257
258
  FunctionCallee HwasanTagMemoryFunc;
259
  FunctionCallee HwasanGenerateTagFunc;
260
  FunctionCallee HwasanThreadEnterFunc;
261
262
  Constant *ShadowGlobal;
263
264
  Value *LocalDynamicShadow = nullptr;
265
  Value *StackBaseTag = nullptr;
266
  GlobalValue *ThreadPtrGlobal = nullptr;
267
};
268
269
class HWAddressSanitizerLegacyPass : public FunctionPass {
270
public:
271
  // Pass identification, replacement for typeid.
272
  static char ID;
273
274
  explicit HWAddressSanitizerLegacyPass(bool CompileKernel = false,
275
                                        bool Recover = false)
276
47
      : FunctionPass(ID), CompileKernel(CompileKernel), Recover(Recover) {}
277
278
199
  StringRef getPassName() const override { return "HWAddressSanitizer"; }
279
280
47
  bool doInitialization(Module &M) override {
281
47
    HWASan = llvm::make_unique<HWAddressSanitizer>(M, CompileKernel, Recover);
282
47
    return true;
283
47
  }
284
285
199
  bool runOnFunction(Function &F) override {
286
199
    return HWASan->sanitizeFunction(F);
287
199
  }
288
289
47
  bool doFinalization(Module &M) override {
290
47
    HWASan.reset();
291
47
    return false;
292
47
  }
293
294
private:
295
  std::unique_ptr<HWAddressSanitizer> HWASan;
296
  bool CompileKernel;
297
  bool Recover;
298
};
299
300
} // end anonymous namespace
301
302
char HWAddressSanitizerLegacyPass::ID = 0;
303
304
11.0k
INITIALIZE_PASS_BEGIN(
305
11.0k
    HWAddressSanitizerLegacyPass, "hwasan",
306
11.0k
    "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
307
11.0k
    false)
308
11.0k
INITIALIZE_PASS_END(
309
    HWAddressSanitizerLegacyPass, "hwasan",
310
    "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
311
    false)
312
313
FunctionPass *llvm::createHWAddressSanitizerLegacyPassPass(bool CompileKernel,
314
10
                                                           bool Recover) {
315
10
  assert(!CompileKernel || Recover);
316
10
  return new HWAddressSanitizerLegacyPass(CompileKernel, Recover);
317
10
}
318
319
HWAddressSanitizerPass::HWAddressSanitizerPass(bool CompileKernel, bool Recover)
320
16
    : CompileKernel(CompileKernel), Recover(Recover) {}
321
322
PreservedAnalyses HWAddressSanitizerPass::run(Module &M,
323
16
                                              ModuleAnalysisManager &MAM) {
324
16
  HWAddressSanitizer HWASan(M, CompileKernel, Recover);
325
16
  bool Modified = false;
326
16
  for (Function &F : M)
327
402
    Modified |= HWASan.sanitizeFunction(F);
328
16
  if (Modified)
329
16
    return PreservedAnalyses::none();
330
0
  return PreservedAnalyses::all();
331
0
}
332
333
/// Module-level initialization.
334
///
335
/// inserts a call to __hwasan_init to the module's constructor list.
336
63
void HWAddressSanitizer::initializeModule(Module &M) {
337
63
  LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
338
63
  auto &DL = M.getDataLayout();
339
63
340
63
  TargetTriple = Triple(M.getTargetTriple());
341
63
342
63
  Mapping.init(TargetTriple);
343
63
344
63
  C = &(M.getContext());
345
63
  CurModuleUniqueId = getUniqueModuleId(&M);
346
63
  IRBuilder<> IRB(*C);
347
63
  IntptrTy = IRB.getIntPtrTy(DL);
348
63
  Int8PtrTy = IRB.getInt8PtrTy();
349
63
  Int8Ty = IRB.getInt8Ty();
350
63
  Int32Ty = IRB.getInt32Ty();
351
63
352
63
  HwasanCtorFunction = nullptr;
353
63
  if (!CompileKernel) {
354
42
    std::tie(HwasanCtorFunction, std::ignore) =
355
42
        getOrCreateSanitizerCtorAndInitFunctions(
356
42
            M, kHwasanModuleCtorName, kHwasanInitName,
357
42
            /*InitArgTypes=*/{},
358
42
            /*InitArgs=*/{},
359
42
            // This callback is invoked when the functions are created the first
360
42
            // time. Hook them into the global ctors list in that case:
361
42
            [&](Function *Ctor, FunctionCallee) {
362
42
              Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
363
42
              Ctor->setComdat(CtorComdat);
364
42
              appendToGlobalCtors(M, Ctor, 0, Ctor);
365
42
            });
366
42
  }
367
63
368
63
  if (!TargetTriple.isAndroid()) {
369
34
    Constant *C = M.getOrInsertGlobal("__hwasan_tls", IntptrTy, [&] {
370
34
      auto *GV = new GlobalVariable(M, IntptrTy, /*isConstant=*/false,
371
34
                                    GlobalValue::ExternalLinkage, nullptr,
372
34
                                    "__hwasan_tls", nullptr,
373
34
                                    GlobalVariable::InitialExecTLSModel);
374
34
      appendToCompilerUsed(M, GV);
375
34
      return GV;
376
34
    });
377
34
    ThreadPtrGlobal = cast<GlobalVariable>(C);
378
34
  }
379
63
}
380
381
219
void HWAddressSanitizer::initializeCallbacks(Module &M) {
382
219
  IRBuilder<> IRB(*C);
383
657
  for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; 
AccessIsWrite++438
) {
384
438
    const std::string TypeStr = AccessIsWrite ? 
"store"219
:
"load"219
;
385
438
    const std::string EndingStr = Recover ? 
"_noabort"194
:
""244
;
386
438
387
438
    HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
388
438
        ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
389
438
        FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false));
390
438
391
2.62k
    for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
392
2.19k
         AccessSizeIndex++) {
393
2.19k
      HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
394
2.19k
          M.getOrInsertFunction(
395
2.19k
              ClMemoryAccessCallbackPrefix + TypeStr +
396
2.19k
                  itostr(1ULL << AccessSizeIndex) + EndingStr,
397
2.19k
              FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false));
398
2.19k
    }
399
438
  }
400
219
401
219
  HwasanTagMemoryFunc = M.getOrInsertFunction(
402
219
      "__hwasan_tag_memory", IRB.getVoidTy(), Int8PtrTy, Int8Ty, IntptrTy);
403
219
  HwasanGenerateTagFunc =
404
219
      M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
405
219
406
219
  ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
407
219
                                     ArrayType::get(IRB.getInt8Ty(), 0));
408
219
409
219
  const std::string MemIntrinCallbackPrefix =
410
219
      CompileKernel ? 
std::string("")24
:
ClMemoryAccessCallbackPrefix195
;
411
219
  HWAsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
412
219
                                        IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
413
219
                                        IRB.getInt8PtrTy(), IntptrTy);
414
219
  HWAsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
415
219
                                       IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
416
219
                                       IRB.getInt8PtrTy(), IntptrTy);
417
219
  HWAsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
418
219
                                       IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
419
219
                                       IRB.getInt32Ty(), IntptrTy);
420
219
421
219
  HWAsanHandleVfork =
422
219
      M.getOrInsertFunction("__hwasan_handle_vfork", IRB.getVoidTy(), IntptrTy);
423
219
424
219
  HwasanThreadEnterFunc =
425
219
      M.getOrInsertFunction("__hwasan_thread_enter", IRB.getVoidTy());
426
219
}
427
428
63
Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
429
63
  // An empty inline asm with input reg == output reg.
430
63
  // An opaque no-op cast, basically.
431
63
  InlineAsm *Asm = InlineAsm::get(
432
63
      FunctionType::get(Int8PtrTy, {ShadowGlobal->getType()}, false),
433
63
      StringRef(""), StringRef("=r,0"),
434
63
      /*hasSideEffects=*/false);
435
63
  return IRB.CreateCall(Asm, {ShadowGlobal}, ".hwasan.shadow");
436
63
}
437
438
153
Value *HWAddressSanitizer::getDynamicShadowNonTls(IRBuilder<> &IRB) {
439
153
  // Generate code only when dynamic addressing is needed.
440
153
  if (Mapping.Offset != kDynamicShadowSentinel)
441
95
    return nullptr;
442
58
443
58
  if (Mapping.InGlobal) {
444
56
    return getDynamicShadowIfunc(IRB);
445
56
  } else {
446
2
    Value *GlobalDynamicAddress =
447
2
        IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
448
2
            kHwasanShadowMemoryDynamicAddress, Int8PtrTy);
449
2
    return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress);
450
2
  }
451
58
}
452
453
Value *HWAddressSanitizer::isInterestingMemoryAccess(Instruction *I,
454
                                                     bool *IsWrite,
455
                                                     uint64_t *TypeSize,
456
                                                     unsigned *Alignment,
457
673
                                                     Value **MaybeMask) {
458
673
  // Skip memory accesses inserted by another instrumentation.
459
673
  if (I->getMetadata("nosanitize")) 
return nullptr0
;
460
673
461
673
  // Do not instrument the load fetching the dynamic shadow address.
462
673
  if (LocalDynamicShadow == I)
463
0
    return nullptr;
464
673
465
673
  Value *PtrOperand = nullptr;
466
673
  const DataLayout &DL = I->getModule()->getDataLayout();
467
673
  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
468
232
    if (!ClInstrumentReads) 
return nullptr0
;
469
232
    *IsWrite = false;
470
232
    *TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
471
232
    *Alignment = LI->getAlignment();
472
232
    PtrOperand = LI->getPointerOperand();
473
441
  } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
474
174
    if (!ClInstrumentWrites) 
return nullptr0
;
475
174
    *IsWrite = true;
476
174
    *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
477
174
    *Alignment = SI->getAlignment();
478
174
    PtrOperand = SI->getPointerOperand();
479
267
  } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
480
4
    if (!ClInstrumentAtomics) 
return nullptr0
;
481
4
    *IsWrite = true;
482
4
    *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
483
4
    *Alignment = 0;
484
4
    PtrOperand = RMW->getPointerOperand();
485
263
  } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
486
4
    if (!ClInstrumentAtomics) 
return nullptr0
;
487
4
    *IsWrite = true;
488
4
    *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
489
4
    *Alignment = 0;
490
4
    PtrOperand = XCHG->getPointerOperand();
491
4
  }
492
673
493
673
  if (PtrOperand) {
494
414
    // Do not instrument accesses from different address spaces; we cannot deal
495
414
    // with them.
496
414
    Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType());
497
414
    if (PtrTy->getPointerAddressSpace() != 0)
498
10
      return nullptr;
499
404
500
404
    // Ignore swifterror addresses.
501
404
    // swifterror memory addresses are mem2reg promoted by instruction
502
404
    // selection. As such they cannot have regular uses like an instrumentation
503
404
    // function and it makes no sense to track them as memory.
504
404
    if (PtrOperand->isSwiftError())
505
0
      return nullptr;
506
663
  }
507
663
508
663
  return PtrOperand;
509
663
}
510
511
50
static unsigned getPointerOperandIndex(Instruction *I) {
512
50
  if (LoadInst *LI = dyn_cast<LoadInst>(I))
513
31
    return LI->getPointerOperandIndex();
514
19
  if (StoreInst *SI = dyn_cast<StoreInst>(I))
515
17
    return SI->getPointerOperandIndex();
516
2
  if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
517
1
    return RMW->getPointerOperandIndex();
518
1
  if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
519
1
    return XCHG->getPointerOperandIndex();
520
0
  report_fatal_error("Unexpected instruction");
521
0
  return -1;
522
0
}
523
524
164
static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
525
164
  size_t Res = countTrailingZeros(TypeSize / 8);
526
164
  assert(Res < kNumberOfAccessSizes);
527
164
  return Res;
528
164
}
529
530
202
void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
531
202
  if (TargetTriple.isAArch64())
532
152
    return;
533
50
534
50
  IRBuilder<> IRB(I);
535
50
  Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
536
50
  Value *UntaggedPtr =
537
50
      IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
538
50
  I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
539
50
}
540
541
151
Value *HWAddressSanitizer::shadowBase() {
542
151
  if (LocalDynamicShadow)
543
129
    return LocalDynamicShadow;
544
22
  return ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, Mapping.Offset),
545
22
                                   Int8PtrTy);
546
22
}
547
548
128
Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
549
128
  // Mem >> Scale
550
128
  Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
551
128
  if (Mapping.Offset == 0)
552
31
    return IRB.CreateIntToPtr(Shadow, Int8PtrTy);
553
97
  // (Mem >> Scale) + Offset
554
97
  return IRB.CreateGEP(Int8Ty, shadowBase(), Shadow);
555
97
}
556
557
void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
558
                                                   unsigned AccessSizeIndex,
559
140
                                                   Instruction *InsertBefore) {
560
140
  const int64_t AccessInfo = Recover * 0x20 + IsWrite * 0x10 + AccessSizeIndex;
561
140
  IRBuilder<> IRB(InsertBefore);
562
140
563
140
  if (!ClInlineAllChecks && TargetTriple.isAArch64() &&
564
140
      
TargetTriple.isOSBinFormatELF()104
&&
!Recover104
) {
565
54
    Module *M = IRB.GetInsertBlock()->getParent()->getParent();
566
54
    Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy);
567
54
    IRB.CreateCall(
568
54
        Intrinsic::getDeclaration(M, Intrinsic::hwasan_check_memaccess),
569
54
        {shadowBase(), Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
570
54
    return;
571
54
  }
572
86
573
86
  Value *PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
574
86
  Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, kPointerTagShift),
575
86
                                  IRB.getInt8Ty());
576
86
  Value *AddrLong = untagPointer(IRB, PtrLong);
577
86
  Value *Shadow = memToShadow(AddrLong, IRB);
578
86
  Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow);
579
86
  Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
580
86
581
86
  int matchAllTag = ClMatchAllTag.getNumOccurrences() > 0 ?
582
85
      
ClMatchAllTag1
: (CompileKernel ?
0xFF26
:
-159
);
583
86
  if (matchAllTag != -1) {
584
26
    Value *TagNotIgnored = IRB.CreateICmpNE(PtrTag,
585
26
        ConstantInt::get(PtrTag->getType(), matchAllTag));
586
26
    TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
587
26
  }
588
86
589
86
  Instruction *CheckTerm =
590
86
      SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, false,
591
86
                                MDBuilder(*C).createBranchWeights(1, 100000));
592
86
593
86
  IRB.SetInsertPoint(CheckTerm);
594
86
  Value *OutOfShortGranuleTagRange =
595
86
      IRB.CreateICmpUGT(MemTag, ConstantInt::get(Int8Ty, 15));
596
86
  Instruction *CheckFailTerm =
597
86
      SplitBlockAndInsertIfThen(OutOfShortGranuleTagRange, CheckTerm, !Recover,
598
86
                                MDBuilder(*C).createBranchWeights(1, 100000));
599
86
600
86
  IRB.SetInsertPoint(CheckTerm);
601
86
  Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(PtrLong, 15), Int8Ty);
602
86
  PtrLowBits = IRB.CreateAdd(
603
86
      PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
604
86
  Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, MemTag);
605
86
  SplitBlockAndInsertIfThen(PtrLowBitsOOB, CheckTerm, false,
606
86
                            MDBuilder(*C).createBranchWeights(1, 100000),
607
86
                            nullptr, nullptr, CheckFailTerm->getParent());
608
86
609
86
  IRB.SetInsertPoint(CheckTerm);
610
86
  Value *InlineTagAddr = IRB.CreateOr(AddrLong, 15);
611
86
  InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, Int8PtrTy);
612
86
  Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
613
86
  Value *InlineTagMismatch = IRB.CreateICmpNE(PtrTag, InlineTag);
614
86
  SplitBlockAndInsertIfThen(InlineTagMismatch, CheckTerm, false,
615
86
                            MDBuilder(*C).createBranchWeights(1, 100000),
616
86
                            nullptr, nullptr, CheckFailTerm->getParent());
617
86
618
86
  IRB.SetInsertPoint(CheckFailTerm);
619
86
  InlineAsm *Asm;
620
86
  switch (TargetTriple.getArch()) {
621
86
    case Triple::x86_64:
622
36
      // The signal handler will find the data address in rdi.
623
36
      Asm = InlineAsm::get(
624
36
          FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
625
36
          "int3\nnopl " + itostr(0x40 + AccessInfo) + "(%rax)",
626
36
          "{rdi}",
627
36
          /*hasSideEffects=*/true);
628
36
      break;
629
86
    case Triple::aarch64:
630
50
    case Triple::aarch64_be:
631
50
      // The signal handler will find the data address in x0.
632
50
      Asm = InlineAsm::get(
633
50
          FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
634
50
          "brk #" + itostr(0x900 + AccessInfo),
635
50
          "{x0}",
636
50
          /*hasSideEffects=*/true);
637
50
      break;
638
50
    default:
639
0
      report_fatal_error("unsupported architecture");
640
86
  }
641
86
  IRB.CreateCall(Asm, PtrLong);
642
86
  if (Recover)
643
65
    cast<BranchInst>(CheckFailTerm)->setSuccessor(0, CheckTerm->getParent());
644
86
}
645
646
3
void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
647
3
  IRBuilder<> IRB(MI);
648
3
  if (isa<MemTransferInst>(MI)) {
649
2
    IRB.CreateCall(
650
2
        isa<MemMoveInst>(MI) ? 
HWAsanMemmove1
:
HWAsanMemcpy1
,
651
2
        {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
652
2
         IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
653
2
         IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
654
2
  } else 
if (1
isa<MemSetInst>(MI)1
) {
655
1
    IRB.CreateCall(
656
1
        HWAsanMemset,
657
1
        {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
658
1
         IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
659
1
         IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
660
1
  }
661
3
  MI->eraseFromParent();
662
3
}
663
664
205
bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
665
205
  LLVM_DEBUG(dbgs() << "Instrumenting: " << *I << "\n");
666
205
  bool IsWrite = false;
667
205
  unsigned Alignment = 0;
668
205
  uint64_t TypeSize = 0;
669
205
  Value *MaybeMask = nullptr;
670
205
671
205
  if (ClInstrumentMemIntrinsics && isa<MemIntrinsic>(I)) {
672
3
    instrumentMemIntrinsic(cast<MemIntrinsic>(I));
673
3
    return true;
674
3
  }
675
202
676
202
  Value *Addr =
677
202
      isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask);
678
202
679
202
  if (!Addr)
680
0
    return false;
681
202
682
202
  if (MaybeMask)
683
0
    return false; //FIXME
684
202
685
202
  IRBuilder<> IRB(I);
686
202
  if (isPowerOf2_64(TypeSize) &&
687
202
      
(TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1)))174
&&
688
202
      
(174
Alignment >= (1UL << Mapping.Scale)174
||
Alignment == 0154
||
689
174
       
Alignment >= TypeSize / 8149
)) {
690
164
    size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
691
164
    if (ClInstrumentWithCalls) {
692
24
      IRB.CreateCall(HwasanMemoryAccessCallback[IsWrite][AccessSizeIndex],
693
24
                     IRB.CreatePointerCast(Addr, IntptrTy));
694
140
    } else {
695
140
      instrumentMemAccessInline(Addr, IsWrite, AccessSizeIndex, I);
696
140
    }
697
164
  } else {
698
38
    IRB.CreateCall(HwasanMemoryAccessCallbackSized[IsWrite],
699
38
                   {IRB.CreatePointerCast(Addr, IntptrTy),
700
38
                    ConstantInt::get(IntptrTy, TypeSize / 8)});
701
38
  }
702
202
  untagPointerOperand(I, Addr);
703
202
704
202
  return true;
705
202
}
706
707
72
static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
708
72
  uint64_t ArraySize = 1;
709
72
  if (AI.isArrayAllocation()) {
710
6
    const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
711
6
    assert(CI && "non-constant array size");
712
6
    ArraySize = CI->getZExtValue();
713
6
  }
714
72
  Type *Ty = AI.getAllocatedType();
715
72
  uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
716
72
  return SizeInBytes * ArraySize;
717
72
}
718
719
bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI,
720
42
                                   Value *Tag, size_t Size) {
721
42
  size_t AlignedSize = alignTo(Size, Mapping.getAllocaAlignment());
722
42
723
42
  Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
724
42
  if (ClInstrumentWithCalls) {
725
0
    IRB.CreateCall(HwasanTagMemoryFunc,
726
0
                   {IRB.CreatePointerCast(AI, Int8PtrTy), JustTag,
727
0
                    ConstantInt::get(IntptrTy, AlignedSize)});
728
42
  } else {
729
42
    size_t ShadowSize = Size >> Mapping.Scale;
730
42
    Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB);
731
42
    // If this memset is not inlined, it will be intercepted in the hwasan
732
42
    // runtime library. That's OK, because the interceptor skips the checks if
733
42
    // the address is in the shadow region.
734
42
    // FIXME: the interceptor is not as fast as real memset. Consider lowering
735
42
    // llvm.memset right here into either a sequence of stores, or a call to
736
42
    // hwasan_tag_memory.
737
42
    if (ShadowSize)
738
23
      IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, /*Align=*/1);
739
42
    if (Size != AlignedSize) {
740
19
      IRB.CreateStore(
741
19
          ConstantInt::get(Int8Ty, Size % Mapping.getAllocaAlignment()),
742
19
          IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
743
19
      IRB.CreateStore(JustTag, IRB.CreateConstGEP1_32(
744
19
                                   Int8Ty, IRB.CreateBitCast(AI, Int8PtrTy),
745
19
                                   AlignedSize - 1));
746
19
    }
747
42
  }
748
42
  return true;
749
42
}
750
751
24
static unsigned RetagMask(unsigned AllocaNo) {
752
24
  // A list of 8-bit numbers that have at most one run of non-zero bits.
753
24
  // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
754
24
  // masks.
755
24
  // The list does not include the value 255, which is used for UAR.
756
24
  //
757
24
  // Because we are more likely to use earlier elements of this list than later
758
24
  // ones, it is sorted in increasing order of probability of collision with a
759
24
  // mask allocated (temporally) nearby. The program that generated this list
760
24
  // can be found at:
761
24
  // https://github.com/google/sanitizers/blob/master/hwaddress-sanitizer/sort_masks.py
762
24
  static unsigned FastMasks[] = {0,  128, 64,  192, 32,  96,  224, 112, 240,
763
24
                                 48, 16,  120, 248, 56,  24,  8,   124, 252,
764
24
                                 60, 28,  12,  4,   126, 254, 62,  30,  14,
765
24
                                 6,  2,   127, 63,  31,  15,  7,   3,   1};
766
24
  return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))];
767
24
}
768
769
1
Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
770
1
  return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
771
1
}
772
773
13
Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
774
13
  if (ClGenerateTagsWithCalls)
775
0
    return getNextTagWithCall(IRB);
776
13
  if (StackBaseTag)
777
6
    return StackBaseTag;
778
7
  // FIXME: use addressofreturnaddress (but implement it in aarch64 backend
779
7
  // first).
780
7
  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
781
7
  auto GetStackPointerFn = Intrinsic::getDeclaration(
782
7
      M, Intrinsic::frameaddress,
783
7
      IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace()));
784
7
  Value *StackPointer = IRB.CreateCall(
785
7
      GetStackPointerFn, {Constant::getNullValue(IRB.getInt32Ty())});
786
7
787
7
  // Extract some entropy from the stack pointer for the tags.
788
7
  // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
789
7
  // between functions).
790
7
  Value *StackPointerLong = IRB.CreatePointerCast(StackPointer, IntptrTy);
791
7
  Value *StackTag =
792
7
      IRB.CreateXor(StackPointerLong, IRB.CreateLShr(StackPointerLong, 20),
793
7
                    "hwasan.stack.base.tag");
794
7
  return StackTag;
795
7
}
796
797
Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
798
21
                                        AllocaInst *AI, unsigned AllocaNo) {
799
21
  if (ClGenerateTagsWithCalls)
800
1
    return getNextTagWithCall(IRB);
801
20
  return IRB.CreateXor(StackTag,
802
20
                       ConstantInt::get(IntptrTy, RetagMask(AllocaNo)));
803
20
}
804
805
21
Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) {
806
21
  if (ClUARRetagToZero)
807
20
    return ConstantInt::get(IntptrTy, 0);
808
1
  if (ClGenerateTagsWithCalls)
809
0
    return getNextTagWithCall(IRB);
810
1
  return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, 0xFFU));
811
1
}
812
813
// Add a tag to an address.
814
Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
815
21
                                      Value *PtrLong, Value *Tag) {
816
21
  Value *TaggedPtrLong;
817
21
  if (CompileKernel) {
818
1
    // Kernel addresses have 0xFF in the most significant byte.
819
1
    Value *ShiftedTag = IRB.CreateOr(
820
1
        IRB.CreateShl(Tag, kPointerTagShift),
821
1
        ConstantInt::get(IntptrTy, (1ULL << kPointerTagShift) - 1));
822
1
    TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
823
20
  } else {
824
20
    // Userspace can simply do OR (tag << 56);
825
20
    Value *ShiftedTag = IRB.CreateShl(Tag, kPointerTagShift);
826
20
    TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
827
20
  }
828
21
  return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
829
21
}
830
831
// Remove tag from an address.
832
161
Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
833
161
  Value *UntaggedPtrLong;
834
161
  if (CompileKernel) {
835
50
    // Kernel addresses have 0xFF in the most significant byte.
836
50
    UntaggedPtrLong = IRB.CreateOr(PtrLong,
837
50
        ConstantInt::get(PtrLong->getType(), 0xFFULL << kPointerTagShift));
838
111
  } else {
839
111
    // Userspace addresses have 0x00.
840
111
    UntaggedPtrLong = IRB.CreateAnd(PtrLong,
841
111
        ConstantInt::get(PtrLong->getType(), ~(0xFFULL << kPointerTagShift)));
842
111
  }
843
161
  return UntaggedPtrLong;
844
161
}
845
846
39
Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
847
39
  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
848
39
  if (TargetTriple.isAArch64() && 
TargetTriple.isAndroid()14
) {
849
6
    // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
850
6
    // in Bionic's libc/private/bionic_tls.h.
851
6
    Function *ThreadPointerFunc =
852
6
        Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
853
6
    Value *SlotPtr = IRB.CreatePointerCast(
854
6
        IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
855
6
                               IRB.CreateCall(ThreadPointerFunc), 0x30),
856
6
        Ty->getPointerTo(0));
857
6
    return SlotPtr;
858
6
  }
859
33
  if (ThreadPtrGlobal)
860
33
    return ThreadPtrGlobal;
861
0
862
0
863
0
  return nullptr;
864
0
}
865
866
199
void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
867
199
  if (!Mapping.InTls) {
868
153
    LocalDynamicShadow = getDynamicShadowNonTls(IRB);
869
153
    return;
870
153
  }
871
46
872
46
  if (!WithFrameRecord && 
TargetTriple.isAndroid()39
) {
873
7
    LocalDynamicShadow = getDynamicShadowIfunc(IRB);
874
7
    return;
875
7
  }
876
39
877
39
  Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
878
39
  assert(SlotPtr);
879
39
880
39
  Instruction *ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
881
39
882
39
  Function *F = IRB.GetInsertBlock()->getParent();
883
39
  if (F->getFnAttribute("hwasan-abi").getValueAsString() == "interceptor") {
884
5
    Value *ThreadLongEqZero =
885
5
        IRB.CreateICmpEQ(ThreadLong, ConstantInt::get(IntptrTy, 0));
886
5
    auto *Br = cast<BranchInst>(SplitBlockAndInsertIfThen(
887
5
        ThreadLongEqZero, cast<Instruction>(ThreadLongEqZero)->getNextNode(),
888
5
        false, MDBuilder(*C).createBranchWeights(1, 100000)));
889
5
890
5
    IRB.SetInsertPoint(Br);
891
5
    // FIXME: This should call a new runtime function with a custom calling
892
5
    // convention to avoid needing to spill all arguments here.
893
5
    IRB.CreateCall(HwasanThreadEnterFunc);
894
5
    LoadInst *ReloadThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
895
5
896
5
    IRB.SetInsertPoint(&*Br->getSuccessor(0)->begin());
897
5
    PHINode *ThreadLongPhi = IRB.CreatePHI(IntptrTy, 2);
898
5
    ThreadLongPhi->addIncoming(ThreadLong, ThreadLong->getParent());
899
5
    ThreadLongPhi->addIncoming(ReloadThreadLong, ReloadThreadLong->getParent());
900
5
    ThreadLong = ThreadLongPhi;
901
5
  }
902
39
903
39
  // Extract the address field from ThreadLong. Unnecessary on AArch64 with TBI.
904
39
  Value *ThreadLongMaybeUntagged =
905
39
      TargetTriple.isAArch64() ? 
ThreadLong14
:
untagPointer(IRB, ThreadLong)25
;
906
39
907
39
  if (WithFrameRecord) {
908
7
    StackBaseTag = IRB.CreateAShr(ThreadLong, 3);
909
7
910
7
    // Prepare ring buffer data.
911
7
    Value *PC;
912
7
    if (TargetTriple.getArch() == Triple::aarch64)
913
6
      PC = readRegister(IRB, "pc");
914
1
    else
915
1
      PC = IRB.CreatePtrToInt(F, IntptrTy);
916
7
    Module *M = F->getParent();
917
7
    auto GetStackPointerFn = Intrinsic::getDeclaration(
918
7
        M, Intrinsic::frameaddress,
919
7
        IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace()));
920
7
    Value *SP = IRB.CreatePtrToInt(
921
7
        IRB.CreateCall(GetStackPointerFn,
922
7
                       {Constant::getNullValue(IRB.getInt32Ty())}),
923
7
        IntptrTy);
924
7
    // Mix SP and PC.
925
7
    // Assumptions:
926
7
    // PC is 0x0000PPPPPPPPPPPP  (48 bits are meaningful, others are zero)
927
7
    // SP is 0xsssssssssssSSSS0  (4 lower bits are zero)
928
7
    // We only really need ~20 lower non-zero bits (SSSS), so we mix like this:
929
7
    //       0xSSSSPPPPPPPPPPPP
930
7
    SP = IRB.CreateShl(SP, 44);
931
7
932
7
    // Store data to ring buffer.
933
7
    Value *RecordPtr =
934
7
        IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IntptrTy->getPointerTo(0));
935
7
    IRB.CreateStore(IRB.CreateOr(PC, SP), RecordPtr);
936
7
937
7
    // Update the ring buffer. Top byte of ThreadLong defines the size of the
938
7
    // buffer in pages, it must be a power of two, and the start of the buffer
939
7
    // must be aligned by twice that much. Therefore wrap around of the ring
940
7
    // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
941
7
    // The use of AShr instead of LShr is due to
942
7
    //   https://bugs.llvm.org/show_bug.cgi?id=39030
943
7
    // Runtime library makes sure not to use the highest bit.
944
7
    Value *WrapMask = IRB.CreateXor(
945
7
        IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
946
7
        ConstantInt::get(IntptrTy, (uint64_t)-1));
947
7
    Value *ThreadLongNew = IRB.CreateAnd(
948
7
        IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
949
7
    IRB.CreateStore(ThreadLongNew, SlotPtr);
950
7
  }
951
39
952
39
  // Get shadow base address by aligning RecordPtr up.
953
39
  // Note: this is not correct if the pointer is already aligned.
954
39
  // Runtime library will make sure this never happens.
955
39
  LocalDynamicShadow = IRB.CreateAdd(
956
39
      IRB.CreateOr(
957
39
          ThreadLongMaybeUntagged,
958
39
          ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
959
39
      ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
960
39
  LocalDynamicShadow = IRB.CreateIntToPtr(LocalDynamicShadow, Int8PtrTy);
961
39
}
962
963
8
Value *HWAddressSanitizer::readRegister(IRBuilder<> &IRB, StringRef Name) {
964
8
  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
965
8
  Function *ReadRegister =
966
8
      Intrinsic::getDeclaration(M, Intrinsic::read_register, IntptrTy);
967
8
  MDNode *MD = MDNode::get(*C, {MDString::get(*C, Name)});
968
8
  Value *Args[] = {MetadataAsValue::get(*C, MD)};
969
8
  return IRB.CreateCall(ReadRegister, Args);
970
8
}
971
972
bool HWAddressSanitizer::instrumentLandingPads(
973
2
    SmallVectorImpl<Instruction *> &LandingPadVec) {
974
2
  for (auto *LP : LandingPadVec) {
975
2
    IRBuilder<> IRB(LP->getNextNode());
976
2
    IRB.CreateCall(
977
2
        HWAsanHandleVfork,
978
2
        {readRegister(IRB, (TargetTriple.getArch() == Triple::x86_64) ? 
"rsp"1
979
2
                                                                      : 
"sp"1
)});
980
2
  }
981
2
  return true;
982
2
}
983
984
bool HWAddressSanitizer::instrumentStack(
985
    SmallVectorImpl<AllocaInst *> &Allocas,
986
    DenseMap<AllocaInst *, std::vector<DbgDeclareInst *>> &AllocaDeclareMap,
987
14
    SmallVectorImpl<Instruction *> &RetVec, Value *StackTag) {
988
14
  // Ideally, we want to calculate tagged stack base pointer, and rewrite all
989
14
  // alloca addresses using that. Unfortunately, offsets are not known yet
990
14
  // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
991
14
  // temp, shift-OR it into each alloca address and xor with the retag mask.
992
14
  // This generates one extra instruction per alloca use.
993
35
  for (unsigned N = 0; N < Allocas.size(); 
++N21
) {
994
21
    auto *AI = Allocas[N];
995
21
    IRBuilder<> IRB(AI->getNextNode());
996
21
997
21
    // Replace uses of the alloca with tagged address.
998
21
    Value *Tag = getAllocaTag(IRB, StackTag, AI, N);
999
21
    Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
1000
21
    Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag);
1001
21
    std::string Name =
1002
21
        AI->hasName() ? AI->getName().str() : 
"alloca." + itostr(N)0
;
1003
21
    Replacement->setName(Name + ".hwasan");
1004
21
1005
66
    for (auto UI = AI->use_begin(), UE = AI->use_end(); UI != UE;) {
1006
45
      Use &U = *UI++;
1007
45
      if (U.getUser() != AILong)
1008
24
        U.set(Replacement);
1009
45
    }
1010
21
1011
21
    for (auto *DDI : AllocaDeclareMap.lookup(AI)) {
1012
4
      DIExpression *OldExpr = DDI->getExpression();
1013
4
      DIExpression *NewExpr = DIExpression::append(
1014
4
          OldExpr, {dwarf::DW_OP_LLVM_tag_offset, RetagMask(N)});
1015
4
      DDI->setArgOperand(2, MetadataAsValue::get(*C, NewExpr));
1016
4
    }
1017
21
1018
21
    size_t Size = getAllocaSizeInBytes(*AI);
1019
21
    tagAlloca(IRB, AI, Tag, Size);
1020
21
1021
21
    for (auto RI : RetVec) {
1022
21
      IRB.SetInsertPoint(RI);
1023
21
1024
21
      // Re-tag alloca memory with the special UAR tag.
1025
21
      Value *Tag = getUARTag(IRB, StackTag);
1026
21
      tagAlloca(IRB, AI, Tag, alignTo(Size, Mapping.getAllocaAlignment()));
1027
21
    }
1028
21
  }
1029
14
1030
14
  return true;
1031
14
}
1032
1033
31
bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1034
31
  return (AI.getAllocatedType()->isSized() &&
1035
31
          // FIXME: instrument dynamic allocas, too
1036
31
          AI.isStaticAlloca() &&
1037
31
          // alloca() may be called with 0 size, ignore it.
1038
31
          
getAllocaSizeInBytes(AI) > 030
&&
1039
31
          // We are only interested in allocas not promotable to registers.
1040
31
          // Promotable allocas are common under -O0.
1041
31
          
!isAllocaPromotable(&AI)30
&&
1042
31
          // inalloca allocas are not treated as static, and we don't want
1043
31
          // dynamic alloca instrumentation for them as well.
1044
31
          
!AI.isUsedWithInAlloca()21
&&
1045
31
          // swifterror allocas are register promoted by ISel
1046
31
          
!AI.isSwiftError()21
);
1047
31
}
1048
1049
601
bool HWAddressSanitizer::sanitizeFunction(Function &F) {
1050
601
  if (&F == HwasanCtorFunction)
1051
42
    return false;
1052
559
1053
559
  if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
1054
340
    return false;
1055
219
1056
219
  LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
1057
219
1058
219
  SmallVector<Instruction*, 16> ToInstrument;
1059
219
  SmallVector<AllocaInst*, 8> AllocasToInstrument;
1060
219
  SmallVector<Instruction*, 8> RetVec;
1061
219
  SmallVector<Instruction*, 8> LandingPadVec;
1062
219
  DenseMap<AllocaInst *, std::vector<DbgDeclareInst *>> AllocaDeclareMap;
1063
223
  for (auto &BB : F) {
1064
502
    for (auto &Inst : BB) {
1065
502
      if (ClInstrumentStack)
1066
502
        if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
1067
31
          if (isInterestingAlloca(*AI))
1068
21
            AllocasToInstrument.push_back(AI);
1069
31
          continue;
1070
31
        }
1071
471
1072
471
      if (isa<ReturnInst>(Inst) || 
isa<ResumeInst>(Inst)252
||
1073
471
          
isa<CleanupReturnInst>(Inst)252
)
1074
219
        RetVec.push_back(&Inst);
1075
471
1076
471
      if (auto *DDI = dyn_cast<DbgDeclareInst>(&Inst))
1077
4
        if (auto *Alloca = dyn_cast_or_null<AllocaInst>(DDI->getAddress()))
1078
4
          AllocaDeclareMap[Alloca].push_back(DDI);
1079
471
1080
471
      if (ClInstrumentLandingPads && isa<LandingPadInst>(Inst))
1081
2
        LandingPadVec.push_back(&Inst);
1082
471
1083
471
      Value *MaybeMask = nullptr;
1084
471
      bool IsWrite;
1085
471
      unsigned Alignment;
1086
471
      uint64_t TypeSize;
1087
471
      Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
1088
471
                                              &Alignment, &MaybeMask);
1089
471
      if (Addr || 
isa<MemIntrinsic>(Inst)269
)
1090
205
        ToInstrument.push_back(&Inst);
1091
471
    }
1092
223
  }
1093
219
1094
219
  initializeCallbacks(*F.getParent());
1095
219
1096
219
  if (!LandingPadVec.empty())
1097
2
    instrumentLandingPads(LandingPadVec);
1098
219
1099
219
  if (AllocasToInstrument.empty() && 
ToInstrument.empty()205
)
1100
20
    return false;
1101
199
1102
199
  assert(!LocalDynamicShadow);
1103
199
1104
199
  Instruction *InsertPt = &*F.getEntryBlock().begin();
1105
199
  IRBuilder<> EntryIRB(InsertPt);
1106
199
  emitPrologue(EntryIRB,
1107
199
               /*WithFrameRecord*/ ClRecordStackHistory &&
1108
199
                   
!AllocasToInstrument.empty()197
);
1109
199
1110
199
  bool Changed = false;
1111
199
  if (!AllocasToInstrument.empty()) {
1112
14
    Value *StackTag =
1113
14
        ClGenerateTagsWithCalls ? 
nullptr1
:
getStackBaseTag(EntryIRB)13
;
1114
14
    Changed |= instrumentStack(AllocasToInstrument, AllocaDeclareMap, RetVec,
1115
14
                               StackTag);
1116
14
  }
1117
199
1118
199
  // Pad and align each of the allocas that we instrumented to stop small
1119
199
  // uninteresting allocas from hiding in instrumented alloca's padding and so
1120
199
  // that we have enough space to store real tags for short granules.
1121
199
  DenseMap<AllocaInst *, AllocaInst *> AllocaToPaddedAllocaMap;
1122
199
  for (AllocaInst *AI : AllocasToInstrument) {
1123
21
    uint64_t Size = getAllocaSizeInBytes(*AI);
1124
21
    uint64_t AlignedSize = alignTo(Size, Mapping.getAllocaAlignment());
1125
21
    AI->setAlignment(std::max(AI->getAlignment(), 16u));
1126
21
    if (Size != AlignedSize) {
1127
19
      Type *AllocatedType = AI->getAllocatedType();
1128
19
      if (AI->isArrayAllocation()) {
1129
1
        uint64_t ArraySize =
1130
1
            cast<ConstantInt>(AI->getArraySize())->getZExtValue();
1131
1
        AllocatedType = ArrayType::get(AllocatedType, ArraySize);
1132
1
      }
1133
19
      Type *TypeWithPadding = StructType::get(
1134
19
          AllocatedType, ArrayType::get(Int8Ty, AlignedSize - Size));
1135
19
      auto *NewAI = new AllocaInst(
1136
19
          TypeWithPadding, AI->getType()->getAddressSpace(), nullptr, "", AI);
1137
19
      NewAI->takeName(AI);
1138
19
      NewAI->setAlignment(AI->getAlignment());
1139
19
      NewAI->setUsedWithInAlloca(AI->isUsedWithInAlloca());
1140
19
      NewAI->setSwiftError(AI->isSwiftError());
1141
19
      NewAI->copyMetadata(*AI);
1142
19
      auto *Bitcast = new BitCastInst(NewAI, AI->getType(), "", AI);
1143
19
      AI->replaceAllUsesWith(Bitcast);
1144
19
      AllocaToPaddedAllocaMap[AI] = NewAI;
1145
19
    }
1146
21
  }
1147
199
1148
199
  if (!AllocaToPaddedAllocaMap.empty()) {
1149
13
    for (auto &BB : F)
1150
13
      for (auto &Inst : BB)
1151
613
        if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst))
1152
4
          if (auto *AI =
1153
0
                  dyn_cast_or_null<AllocaInst>(DVI->getVariableLocation()))
1154
0
            if (auto *NewAI = AllocaToPaddedAllocaMap.lookup(AI))
1155
0
              DVI->setArgOperand(
1156
0
                  0, MetadataAsValue::get(*C, LocalAsMetadata::get(NewAI)));
1157
13
    for (auto &P : AllocaToPaddedAllocaMap)
1158
19
      P.first->eraseFromParent();
1159
13
  }
1160
199
1161
199
  // If we split the entry block, move any allocas that were originally in the
1162
199
  // entry block back into the entry block so that they aren't treated as
1163
199
  // dynamic allocas.
1164
199
  if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1165
5
    InsertPt = &*F.getEntryBlock().begin();
1166
5
    for (auto II = EntryIRB.GetInsertBlock()->begin(),
1167
5
              IE = EntryIRB.GetInsertBlock()->end();
1168
68
         II != IE;) {
1169
63
      Instruction *I = &*II++;
1170
63
      if (auto *AI = dyn_cast<AllocaInst>(I))
1171
3
        if (isa<ConstantInt>(AI->getArraySize()))
1172
2
          I->moveBefore(InsertPt);
1173
63
    }
1174
5
  }
1175
199
1176
199
  for (auto Inst : ToInstrument)
1177
205
    Changed |= instrumentMemAccess(Inst);
1178
199
1179
199
  LocalDynamicShadow = nullptr;
1180
199
  StackBaseTag = nullptr;
1181
199
1182
199
  return Changed;
1183
199
}
1184
1185
63
void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple) {
1186
63
  Scale = kDefaultShadowScale;
1187
63
  if (ClMappingOffset.getNumOccurrences() > 0) {
1188
7
    InGlobal = false;
1189
7
    InTls = false;
1190
7
    Offset = ClMappingOffset;
1191
56
  } else if (ClEnableKhwasan || 
ClInstrumentWithCalls48
) {
1192
12
    InGlobal = false;
1193
12
    InTls = false;
1194
12
    Offset = 0;
1195
44
  } else if (ClWithIfunc) {
1196
7
    InGlobal = true;
1197
7
    InTls = false;
1198
7
    Offset = kDynamicShadowSentinel;
1199
37
  } else if (ClWithTls) {
1200
36
    InGlobal = false;
1201
36
    InTls = true;
1202
36
    Offset = kDynamicShadowSentinel;
1203
36
  } else {
1204
1
    InGlobal = false;
1205
1
    InTls = false;
1206
1
    Offset = kDynamicShadowSentinel;
1207
1
  }
1208
63
}