Coverage Report

Created: 2017-10-03 07:32

/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/lib/DebugInfo/CodeView/TypeSerializer.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- TypeSerialzier.cpp -------------------------------------------------===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
10
#include "llvm/DebugInfo/CodeView/TypeSerializer.h"
11
#include "llvm/ADT/ArrayRef.h"
12
#include "llvm/ADT/DenseSet.h"
13
#include "llvm/ADT/STLExtras.h"
14
#include "llvm/DebugInfo/CodeView/CodeView.h"
15
#include "llvm/DebugInfo/CodeView/RecordSerialization.h"
16
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
17
#include "llvm/Support/Allocator.h"
18
#include "llvm/Support/BinaryByteStream.h"
19
#include "llvm/Support/BinaryStreamWriter.h"
20
#include "llvm/Support/Endian.h"
21
#include "llvm/Support/Error.h"
22
#include <algorithm>
23
#include <cassert>
24
#include <cstdint>
25
#include <cstring>
26
27
using namespace llvm;
28
using namespace llvm::codeview;
29
30
namespace {
31
32
struct HashedType {
33
  uint64_t Hash;
34
  const uint8_t *Data;
35
  unsigned Size; // FIXME: Go to uint16_t?
36
  TypeIndex Index;
37
};
38
39
/// Wrapper around a poitner to a HashedType. Hash and equality operations are
40
/// based on data in the pointee.
41
struct HashedTypePtr {
42
  HashedTypePtr() = default;
43
66.9k
  HashedTypePtr(HashedType *Ptr) : Ptr(Ptr) {}
44
45
  HashedType *Ptr = nullptr;
46
};
47
48
} // end anonymous namespace
49
50
namespace llvm {
51
52
template <> struct DenseMapInfo<HashedTypePtr> {
53
56.7k
  static inline HashedTypePtr getEmptyKey() { return HashedTypePtr(nullptr); }
54
55
7.74k
  static inline HashedTypePtr getTombstoneKey() {
56
7.74k
    return HashedTypePtr(reinterpret_cast<HashedType *>(1));
57
7.74k
  }
58
59
2.80k
  static unsigned getHashValue(HashedTypePtr Val) {
60
2.80k
    assert(Val.Ptr != getEmptyKey().Ptr && Val.Ptr != getTombstoneKey().Ptr);
61
2.80k
    return Val.Ptr->Hash;
62
2.80k
  }
63
64
50.6k
  static bool isEqual(HashedTypePtr LHSP, HashedTypePtr RHSP) {
65
50.6k
    HashedType *LHS = LHSP.Ptr;
66
50.6k
    HashedType *RHS = RHSP.Ptr;
67
50.6k
    if (
RHS == getEmptyKey().Ptr || 50.6k
RHS == getTombstoneKey().Ptr4.34k
)
68
49.4k
      return LHS == RHS;
69
1.15k
    
if (1.15k
LHS->Hash != RHS->Hash || 1.15k
LHS->Size != RHS->Size388
)
70
771
      return false;
71
388
    return ::memcmp(LHS->Data, RHS->Data, LHS->Size) == 0;
72
388
  }
73
};
74
75
} // end namespace llvm
76
77
/// Private implementation so that we don't leak our DenseMap instantiations to
78
/// users.
79
class llvm::codeview::TypeHasher {
80
private:
81
  /// Storage for type record provided by the caller. Records will outlive the
82
  /// hasher object, so they should be allocated here.
83
  BumpPtrAllocator &RecordStorage;
84
85
  /// Storage for hash keys. These only need to live as long as the hashing
86
  /// operation.
87
  BumpPtrAllocator KeyStorage;
88
89
  /// Hash table. We really want a DenseMap<ArrayRef<uint8_t>, TypeIndex> here,
90
  /// but DenseMap is inefficient when the keys are long (like type records)
91
  /// because it recomputes the hash value of every key when it grows. This
92
  /// value type stores the hash out of line in KeyStorage, so that table
93
  /// entries are small and easy to rehash.
94
  DenseSet<HashedTypePtr> HashedRecords;
95
96
public:
97
4.97k
  TypeHasher(BumpPtrAllocator &RecordStorage) : RecordStorage(RecordStorage) {}
98
99
0
  void reset() { HashedRecords.clear(); }
100
101
  /// Takes the bytes of type record, inserts them into the hash table, saves
102
  /// them, and returns a pointer to an identical stable type record along with
103
  /// its type index in the destination stream.
104
  TypeIndex getOrCreateRecord(ArrayRef<uint8_t> &Record, TypeIndex TI);
105
};
106
107
TypeIndex TypeHasher::getOrCreateRecord(ArrayRef<uint8_t> &Record,
108
2.51k
                                        TypeIndex TI) {
109
2.51k
  assert(Record.size() < UINT32_MAX && "Record too big");
110
2.51k
  assert(Record.size() % 4 == 0 && "Record is not aligned to 4 bytes!");
111
2.51k
112
2.51k
  // Compute the hash up front so we can store it in the key.
113
2.51k
  HashedType TempHashedType = {hash_value(Record), Record.data(),
114
2.51k
                               unsigned(Record.size()), TI};
115
2.51k
  auto Result = HashedRecords.insert(HashedTypePtr(&TempHashedType));
116
2.51k
  HashedType *&Hashed = Result.first->Ptr;
117
2.51k
118
2.51k
  if (
Result.second2.51k
) {
119
2.12k
    // This was a new type record. We need stable storage for both the key and
120
2.12k
    // the record. The record should outlive the hashing operation.
121
2.12k
    Hashed = KeyStorage.Allocate<HashedType>();
122
2.12k
    *Hashed = TempHashedType;
123
2.12k
124
2.12k
    uint8_t *Stable = RecordStorage.Allocate<uint8_t>(Record.size());
125
2.12k
    memcpy(Stable, Record.data(), Record.size());
126
2.12k
    Hashed->Data = Stable;
127
2.12k
    assert(Hashed->Size == Record.size());
128
2.12k
  }
129
2.51k
130
2.51k
  // Update the caller's copy of Record to point a stable copy.
131
2.51k
  Record = ArrayRef<uint8_t>(Hashed->Data, Hashed->Size);
132
2.51k
  return Hashed->Index;
133
2.51k
}
134
135
5.44k
TypeIndex TypeSerializer::nextTypeIndex() const {
136
5.44k
  return TypeIndex::fromArrayIndex(SeenRecords.size());
137
5.44k
}
138
139
0
bool TypeSerializer::isInFieldList() const {
140
0
  return TypeKind.hasValue() && *TypeKind == TypeLeafKind::LF_FIELDLIST;
141
0
}
142
143
12.3k
MutableArrayRef<uint8_t> TypeSerializer::getCurrentSubRecordData() {
144
12.3k
  assert(isInFieldList());
145
12.3k
  return getCurrentRecordData().drop_front(CurrentSegment.length());
146
12.3k
}
147
148
14.1k
MutableArrayRef<uint8_t> TypeSerializer::getCurrentRecordData() {
149
14.1k
  return MutableArrayRef<uint8_t>(RecordBuffer).take_front(Writer.getOffset());
150
14.1k
}
151
152
1.78k
Error TypeSerializer::writeRecordPrefix(TypeLeafKind Kind) {
153
1.78k
  RecordPrefix Prefix;
154
1.78k
  Prefix.RecordKind = Kind;
155
1.78k
  Prefix.RecordLen = 0;
156
1.78k
  if (auto EC = Writer.writeObject(Prefix))
157
0
    return EC;
158
1.78k
  return Error::success();
159
1.78k
}
160
161
Expected<MutableArrayRef<uint8_t>>
162
7.96k
TypeSerializer::addPadding(MutableArrayRef<uint8_t> Record) {
163
7.96k
  uint32_t Align = Record.size() % 4;
164
7.96k
  if (Align == 0)
165
6.10k
    return Record;
166
1.85k
167
1.85k
  int PaddingBytes = 4 - Align;
168
1.85k
  int N = PaddingBytes;
169
4.66k
  while (
PaddingBytes > 04.66k
) {
170
2.80k
    uint8_t Pad = static_cast<uint8_t>(LF_PAD0 + PaddingBytes);
171
2.80k
    if (auto EC = Writer.writeInteger(Pad))
172
0
      return std::move(EC);
173
2.80k
    --PaddingBytes;
174
2.80k
  }
175
1.85k
  return MutableArrayRef<uint8_t>(Record.data(), Record.size() + N);
176
7.96k
}
177
178
TypeSerializer::TypeSerializer(BumpPtrAllocator &Storage, bool Hash)
179
    : RecordStorage(Storage), RecordBuffer(MaxRecordLength * 2),
180
      Stream(RecordBuffer, support::little), Writer(Stream),
181
5.15k
      Mapping(Writer) {
182
5.15k
  // RecordBuffer needs to be able to hold enough data so that if we are 1
183
5.15k
  // byte short of MaxRecordLen, and then we try to write MaxRecordLen bytes,
184
5.15k
  // we won't overflow.
185
5.15k
  if (Hash)
186
4.97k
    Hasher = llvm::make_unique<TypeHasher>(Storage);
187
5.15k
}
188
189
5.15k
TypeSerializer::~TypeSerializer() = default;
190
191
1.49k
ArrayRef<ArrayRef<uint8_t>> TypeSerializer::records() const {
192
1.49k
  return SeenRecords;
193
1.49k
}
194
195
136
void TypeSerializer::reset() {
196
136
  if (Hasher)
197
0
    Hasher->reset();
198
136
  Writer.setOffset(0);
199
136
  CurrentSegment = RecordSegment();
200
136
  FieldListSegments.clear();
201
136
  TypeKind.reset();
202
136
  MemberKind.reset();
203
136
  SeenRecords.clear();
204
136
}
205
206
2.92k
TypeIndex TypeSerializer::insertRecordBytes(ArrayRef<uint8_t> &Record) {
207
2.92k
  assert(!TypeKind.hasValue() && "Already in a type mapping!");
208
2.92k
  assert(Writer.getOffset() == 0 && "Stream has data already!");
209
2.92k
210
2.92k
  if (
Hasher2.92k
) {
211
2.51k
    TypeIndex ActualTI = Hasher->getOrCreateRecord(Record, nextTypeIndex());
212
2.51k
    if (nextTypeIndex() == ActualTI)
213
2.12k
      SeenRecords.push_back(Record);
214
2.51k
    return ActualTI;
215
2.51k
  }
216
407
217
407
  TypeIndex NewTI = nextTypeIndex();
218
407
  uint8_t *Stable = RecordStorage.Allocate<uint8_t>(Record.size());
219
407
  memcpy(Stable, Record.data(), Record.size());
220
407
  Record = ArrayRef<uint8_t>(Stable, Record.size());
221
407
  SeenRecords.push_back(Record);
222
407
  return NewTI;
223
407
}
224
225
1.00k
TypeIndex TypeSerializer::insertRecord(const RemappedType &Record) {
226
1.00k
  assert(!TypeKind.hasValue() && "Already in a type mapping!");
227
1.00k
  assert(Writer.getOffset() == 0 && "Stream has data already!");
228
1.00k
229
1.00k
  TypeIndex TI;
230
1.00k
  ArrayRef<uint8_t> OriginalData = Record.OriginalRecord.RecordData;
231
1.00k
  if (
Record.Mappings.empty()1.00k
) {
232
488
    // This record did not remap any type indices.  Just write it.
233
488
    return insertRecordBytes(OriginalData);
234
488
  }
235
514
236
514
  // At least one type index was remapped.  Before we can hash it we have to
237
514
  // copy the full record bytes, re-write each type index, then hash the copy.
238
514
  // We do this in temporary storage since only the DenseMap can decide whether
239
514
  // this record already exists, and if it does we don't want the memory to
240
514
  // stick around.
241
514
  RemapStorage.resize(OriginalData.size());
242
514
  ::memcpy(&RemapStorage[0], OriginalData.data(), OriginalData.size());
243
514
  uint8_t *ContentBegin = RemapStorage.data() + sizeof(RecordPrefix);
244
1.01k
  for (const auto &M : Record.Mappings) {
245
1.01k
    // First 4 bytes of every record are the record prefix, but the mapping
246
1.01k
    // offset is relative to the content which starts after.
247
1.01k
    *(TypeIndex *)(ContentBegin + M.first) = M.second;
248
1.01k
  }
249
1.00k
  auto RemapRef = makeArrayRef(RemapStorage);
250
1.00k
  return insertRecordBytes(RemapRef);
251
1.00k
}
252
253
1.77k
Error TypeSerializer::visitTypeBegin(CVType &Record) {
254
1.77k
  assert(!TypeKind.hasValue() && "Already in a type mapping!");
255
1.77k
  assert(Writer.getOffset() == 0 && "Stream has data already!");
256
1.77k
257
1.77k
  if (auto EC = writeRecordPrefix(Record.kind()))
258
0
    return EC;
259
1.77k
260
1.77k
  TypeKind = Record.kind();
261
1.77k
  if (auto EC = Mapping.visitTypeBegin(Record))
262
0
    return EC;
263
1.77k
264
1.77k
  return Error::success();
265
1.77k
}
266
267
1.77k
Expected<TypeIndex> TypeSerializer::visitTypeEndGetIndex(CVType &Record) {
268
1.77k
  assert(TypeKind.hasValue() && "Not in a type mapping!");
269
1.77k
  if (auto EC = Mapping.visitTypeEnd(Record))
270
0
    return std::move(EC);
271
1.77k
272
1.77k
  // Update the record's length and fill out the CVType members to point to
273
1.77k
  // the stable memory holding the record's data.
274
1.77k
  auto ThisRecordData = getCurrentRecordData();
275
1.77k
  auto ExpectedData = addPadding(ThisRecordData);
276
1.77k
  if (!ExpectedData)
277
0
    return ExpectedData.takeError();
278
1.77k
  ThisRecordData = *ExpectedData;
279
1.77k
280
1.77k
  RecordPrefix *Prefix =
281
1.77k
      reinterpret_cast<RecordPrefix *>(ThisRecordData.data());
282
1.77k
  Prefix->RecordLen = ThisRecordData.size() - sizeof(uint16_t);
283
1.77k
284
1.77k
  Record.Type = *TypeKind;
285
1.77k
  Record.RecordData = ThisRecordData;
286
1.77k
287
1.77k
  // insertRecordBytes assumes we're not in a mapping, so do this first.
288
1.77k
  TypeKind.reset();
289
1.77k
  Writer.setOffset(0);
290
1.77k
291
1.77k
  TypeIndex InsertedTypeIndex = insertRecordBytes(Record.RecordData);
292
1.77k
293
1.77k
  // Write out each additional segment in reverse order, and update each
294
1.77k
  // record's continuation index to point to the previous one.
295
4
  for (auto X : reverse(FieldListSegments)) {
296
4
    auto CIBytes = X.take_back(sizeof(uint32_t));
297
4
    support::ulittle32_t *CI =
298
4
        reinterpret_cast<support::ulittle32_t *>(CIBytes.data());
299
4
    assert(*CI == 0xB0C0B0C0 && "Invalid TypeIndex placeholder");
300
4
    *CI = InsertedTypeIndex.getIndex();
301
4
    InsertedTypeIndex = insertRecordBytes(X);
302
4
  }
303
1.77k
304
1.77k
  FieldListSegments.clear();
305
1.77k
  CurrentSegment.SubRecords.clear();
306
1.77k
307
1.77k
  return InsertedTypeIndex;
308
1.77k
}
309
310
136
Error TypeSerializer::visitTypeEnd(CVType &Record) {
311
136
  auto ExpectedIndex = visitTypeEndGetIndex(Record);
312
136
  if (!ExpectedIndex)
313
0
    return ExpectedIndex.takeError();
314
136
  return Error::success();
315
136
}
316
317
6.18k
Error TypeSerializer::visitMemberBegin(CVMemberRecord &Record) {
318
6.18k
  assert(isInFieldList() && "Not in a field list!");
319
6.18k
  assert(!MemberKind.hasValue() && "Already in a member record!");
320
6.18k
  MemberKind = Record.Kind;
321
6.18k
322
6.18k
  if (auto EC = Mapping.visitMemberBegin(Record))
323
0
    return EC;
324
6.18k
325
6.18k
  return Error::success();
326
6.18k
}
327
328
6.18k
Error TypeSerializer::visitMemberEnd(CVMemberRecord &Record) {
329
6.18k
  if (auto EC = Mapping.visitMemberEnd(Record))
330
0
    return EC;
331
6.18k
332
6.18k
  // Check if this subrecord makes the current segment not fit in 64K minus
333
6.18k
  // the space for a continuation record (8 bytes). If the segment does not
334
6.18k
  // fit, insert a continuation record.
335
6.18k
  
if (6.18k
Writer.getOffset() > MaxRecordLength - ContinuationLength6.18k
) {
336
4
    MutableArrayRef<uint8_t> Data = getCurrentRecordData();
337
4
    SubRecord LastSubRecord = CurrentSegment.SubRecords.back();
338
4
    uint32_t CopySize = CurrentSegment.length() - LastSubRecord.Size;
339
4
    auto CopyData = Data.take_front(CopySize);
340
4
    auto LeftOverData = Data.drop_front(CopySize);
341
4
    assert(LastSubRecord.Size == LeftOverData.size());
342
4
343
4
    // Allocate stable storage for the record and copy the old record plus
344
4
    // continuation over.
345
4
    uint16_t LengthWithSize = CopySize + ContinuationLength;
346
4
    assert(LengthWithSize <= MaxRecordLength);
347
4
    RecordPrefix *Prefix = reinterpret_cast<RecordPrefix *>(CopyData.data());
348
4
    Prefix->RecordLen = LengthWithSize - sizeof(uint16_t);
349
4
350
4
    uint8_t *SegmentBytes = RecordStorage.Allocate<uint8_t>(LengthWithSize);
351
4
    auto SavedSegment = MutableArrayRef<uint8_t>(SegmentBytes, LengthWithSize);
352
4
    MutableBinaryByteStream CS(SavedSegment, support::little);
353
4
    BinaryStreamWriter CW(CS);
354
4
    if (auto EC = CW.writeBytes(CopyData))
355
0
      return EC;
356
4
    
if (auto 4
EC4
= CW.writeEnum(TypeLeafKind::LF_INDEX))
357
0
      return EC;
358
4
    
if (auto 4
EC4
= CW.writeInteger<uint16_t>(0))
359
0
      return EC;
360
4
    
if (auto 4
EC4
= CW.writeInteger<uint32_t>(0xB0C0B0C0))
361
0
      return EC;
362
4
    FieldListSegments.push_back(SavedSegment);
363
4
364
4
    // Write a new placeholder record prefix to mark the start of this new
365
4
    // top-level record.
366
4
    Writer.setOffset(0);
367
4
    if (auto EC = writeRecordPrefix(TypeLeafKind::LF_FIELDLIST))
368
0
      return EC;
369
4
370
4
    // Then move over the subrecord that overflowed the old segment to the
371
4
    // beginning of this segment.  Note that we have to use memmove here
372
4
    // instead of Writer.writeBytes(), because the new and old locations
373
4
    // could overlap.
374
4
    ::memmove(Stream.data().data() + sizeof(RecordPrefix), LeftOverData.data(),
375
4
              LeftOverData.size());
376
4
    // And point the segment writer at the end of that subrecord.
377
4
    Writer.setOffset(LeftOverData.size() + sizeof(RecordPrefix));
378
4
379
4
    CurrentSegment.SubRecords.clear();
380
4
    CurrentSegment.SubRecords.push_back(LastSubRecord);
381
4
  }
382
6.18k
383
6.18k
  // Update the CVMemberRecord since we may have shifted around or gotten
384
6.18k
  // padded.
385
6.18k
  Record.Data = getCurrentSubRecordData();
386
6.18k
387
6.18k
  MemberKind.reset();
388
6.18k
  return Error::success();
389
6.18k
}