Coverage Report

Created: 2022-05-14 11:35

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/PatternInit.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- PatternInit.cpp - Pattern Initialization -------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
9
#include "PatternInit.h"
10
#include "CodeGenModule.h"
11
#include "clang/Basic/TargetInfo.h"
12
#include "llvm/IR/Constant.h"
13
#include "llvm/IR/Type.h"
14
15
llvm::Constant *clang::CodeGen::initializationPatternFor(CodeGenModule &CGM,
16
1.12k
                                                         llvm::Type *Ty) {
17
  // The following value is a guaranteed unmappable pointer value and has a
18
  // repeated byte-pattern which makes it easier to synthesize. We use it for
19
  // pointers as well as integers so that aggregates are likely to be
20
  // initialized with this repeated value.
21
  // For 32-bit platforms it's a bit trickier because, across systems, only the
22
  // zero page can reasonably be expected to be unmapped. We use max 0xFFFFFFFF
23
  // assuming that memory access will overlap into zero page.
24
1.12k
  const uint64_t IntValue =
25
1.12k
      CGM.getContext().getTargetInfo().getMaxPointerWidth() < 64
26
1.12k
          ? 
0xFFFFFFFFFFFFFFFFull326
27
1.12k
          : 
0xAAAAAAAAAAAAAAAAull798
;
28
  // Floating-point values are initialized as NaNs because they propagate. Using
29
  // a repeated byte pattern means that it will be easier to initialize
30
  // all-floating-point aggregates and arrays with memset. Further, aggregates
31
  // which mix integral and a few floats might also initialize with memset
32
  // followed by a handful of stores for the floats. Using fairly unique NaNs
33
  // also means they'll be easier to distinguish in a crash.
34
1.12k
  constexpr bool NegativeNaN = true;
35
1.12k
  constexpr uint64_t NaNPayload = 0xFFFFFFFFFFFFFFFFull;
36
1.12k
  if (Ty->isIntOrIntVectorTy()) {
37
584
    unsigned BitWidth =
38
584
        cast<llvm::IntegerType>(Ty->getScalarType())->getBitWidth();
39
584
    if (BitWidth <= 64)
40
576
      return llvm::ConstantInt::get(Ty, IntValue);
41
8
    return llvm::ConstantInt::get(
42
8
        Ty, llvm::APInt::getSplat(BitWidth, llvm::APInt(64, IntValue)));
43
584
  }
44
540
  if (Ty->isPtrOrPtrVectorTy()) {
45
65
    auto *PtrTy = cast<llvm::PointerType>(Ty->getScalarType());
46
65
    unsigned PtrWidth = CGM.getContext().getTargetInfo().getPointerWidth(
47
65
        PtrTy->getAddressSpace());
48
65
    if (PtrWidth > 64)
49
0
      llvm_unreachable("pattern initialization of unsupported pointer width");
50
65
    llvm::Type *IntTy = llvm::IntegerType::get(CGM.getLLVMContext(), PtrWidth);
51
65
    auto *Int = llvm::ConstantInt::get(IntTy, IntValue);
52
65
    return llvm::ConstantExpr::getIntToPtr(Int, PtrTy);
53
65
  }
54
475
  if (Ty->isFPOrFPVectorTy()) {
55
94
    unsigned BitWidth = llvm::APFloat::semanticsSizeInBits(
56
94
        Ty->getScalarType()->getFltSemantics());
57
94
    llvm::APInt Payload(64, NaNPayload);
58
94
    if (BitWidth >= 64)
59
51
      Payload = llvm::APInt::getSplat(BitWidth, Payload);
60
94
    return llvm::ConstantFP::getQNaN(Ty, NegativeNaN, &Payload);
61
94
  }
62
381
  if (Ty->isArrayTy()) {
63
    // Note: this doesn't touch tail padding (at the end of an object, before
64
    // the next array object). It is instead handled by replaceUndef.
65
178
    auto *ArrTy = cast<llvm::ArrayType>(Ty);
66
178
    llvm::SmallVector<llvm::Constant *, 8> Element(
67
178
        ArrTy->getNumElements(),
68
178
        initializationPatternFor(CGM, ArrTy->getElementType()));
69
178
    return llvm::ConstantArray::get(ArrTy, Element);
70
178
  }
71
72
  // Note: this doesn't touch struct padding. It will initialize as much union
73
  // padding as is required for the largest type in the union. Padding is
74
  // instead handled by replaceUndef. Stores to structs with volatile members
75
  // don't have a volatile qualifier when initialized according to C++. This is
76
  // fine because stack-based volatiles don't really have volatile semantics
77
  // anyways, and the initialization shouldn't be observable.
78
203
  auto *StructTy = cast<llvm::StructType>(Ty);
79
203
  llvm::SmallVector<llvm::Constant *, 8> Struct(StructTy->getNumElements());
80
604
  for (unsigned El = 0; El != Struct.size(); 
++El401
)
81
401
    Struct[El] = initializationPatternFor(CGM, StructTy->getElementType(El));
82
203
  return llvm::ConstantStruct::get(StructTy, Struct);
83
381
}