Coverage Report

Created: 2020-09-19 12:23

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This is the code that handles AST -> LLVM type lowering.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CodeGenTypes.h"
14
#include "CGCXXABI.h"
15
#include "CGCall.h"
16
#include "CGOpenCLRuntime.h"
17
#include "CGRecordLayout.h"
18
#include "TargetInfo.h"
19
#include "clang/AST/ASTContext.h"
20
#include "clang/AST/DeclCXX.h"
21
#include "clang/AST/DeclObjC.h"
22
#include "clang/AST/Expr.h"
23
#include "clang/AST/RecordLayout.h"
24
#include "clang/CodeGen/CGFunctionInfo.h"
25
#include "llvm/IR/DataLayout.h"
26
#include "llvm/IR/DerivedTypes.h"
27
#include "llvm/IR/Module.h"
28
using namespace clang;
29
using namespace CodeGen;
30
31
CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
32
  : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
33
    Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()),
34
29.7k
    TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) {
35
29.7k
  SkippedLayout = false;
36
29.7k
}
37
38
29.7k
CodeGenTypes::~CodeGenTypes() {
39
29.7k
  for (llvm::FoldingSet<CGFunctionInfo>::iterator
40
233k
       I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
41
204k
    delete &*I++;
42
29.7k
}
43
44
11.8k
const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const {
45
11.8k
  return CGM.getCodeGenOpts();
46
11.8k
}
47
48
void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
49
                                     llvm::StructType *Ty,
50
108k
                                     StringRef suffix) {
51
108k
  SmallString<256> TypeName;
52
108k
  llvm::raw_svector_ostream OS(TypeName);
53
108k
  OS << RD->getKindName() << '.';
54
108k
55
  // Name the codegen type after the typedef name
56
  // if there is no tag type name available
57
108k
  if (RD->getIdentifier()) {
58
    // FIXME: We should not have to check for a null decl context here.
59
    // Right now we do it because the implicit Obj-C decls don't have one.
60
102k
    if (RD->getDeclContext())
61
102k
      RD->printQualifiedName(OS);
62
0
    else
63
0
      RD->printName(OS);
64
6.09k
  } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
65
    // FIXME: We should not have to check for a null decl context here.
66
    // Right now we do it because the implicit Obj-C decls don't have one.
67
1.89k
    if (TDD->getDeclContext())
68
1.89k
      TDD->printQualifiedName(OS);
69
0
    else
70
0
      TDD->printName(OS);
71
1.89k
  } else
72
4.20k
    OS << "anon";
73
108k
74
108k
  if (!suffix.empty())
75
7.55k
    OS << suffix;
76
108k
77
108k
  Ty->setName(OS.str());
78
108k
}
79
80
/// ConvertTypeForMem - Convert type T into a llvm::Type.  This differs from
81
/// ConvertType in that it is used to convert to the memory representation for
82
/// a type.  For example, the scalar representation for _Bool is i1, but the
83
/// memory representation is usually i8 or i32, depending on the target.
84
1.71M
llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) {
85
1.71M
  if (T->isConstantMatrixType()) {
86
287
    const Type *Ty = Context.getCanonicalType(T).getTypePtr();
87
287
    const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
88
287
    return llvm::ArrayType::get(ConvertType(MT->getElementType()),
89
287
                                MT->getNumRows() * MT->getNumColumns());
90
287
  }
91
1.71M
92
1.71M
  llvm::Type *R = ConvertType(T);
93
1.71M
94
  // If this is a bool type, or an ExtIntType in a bitfield representation,
95
  // map this integer to the target-specified size.
96
1.71M
  if ((ForBitField && 
T->isExtIntType()117
) ||
97
1.71M
      (!T->isExtIntType() && 
R->isIntegerTy(1)1.71M
))
98
12.8k
    return llvm::IntegerType::get(getLLVMContext(),
99
12.8k
                                  (unsigned)Context.getTypeSize(T));
100
1.70M
101
  // Else, don't map it.
102
1.70M
  return R;
103
1.70M
}
104
105
/// isRecordLayoutComplete - Return true if the specified type is already
106
/// completely laid out.
107
580k
bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
108
580k
  llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
109
580k
  RecordDeclTypes.find(Ty);
110
580k
  return I != RecordDeclTypes.end() && 
!I->second->isOpaque()28.3k
;
111
580k
}
112
113
static bool
114
isSafeToConvert(QualType T, CodeGenTypes &CGT,
115
                llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked);
116
117
118
/// isSafeToConvert - Return true if it is safe to convert the specified record
119
/// decl to IR and lay it out, false if doing so would cause us to get into a
120
/// recursive compilation mess.
121
static bool
122
isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT,
123
581k
                llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
124
  // If we have already checked this type (maybe the same type is used by-value
125
  // multiple times in multiple structure fields, don't check again.
126
581k
  if (!AlreadyChecked.insert(RD).second)
127
780
    return true;
128
580k
129
580k
  const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr();
130
580k
131
  // If this type is already laid out, converting it is a noop.
132
580k
  if (CGT.isRecordLayoutComplete(Key)) 
return true2.40k
;
133
577k
134
  // If this type is currently being laid out, we can't recursively compile it.
135
577k
  if (CGT.isRecordBeingLaidOut(Key))
136
610
    return false;
137
577k
138
  // If this type would require laying out bases that are currently being laid
139
  // out, don't do it.  This includes virtual base classes which get laid out
140
  // when a class is translated, even though they aren't embedded by-value into
141
  // the class.
142
577k
  if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
143
575k
    for (const auto &I : CRD->bases())
144
542k
      if (!isSafeToConvert(I.getType()->castAs<RecordType>()->getDecl(), CGT,
145
542k
                           AlreadyChecked))
146
23
        return false;
147
575k
  }
148
577k
149
  // If this type would require laying out members that are currently being laid
150
  // out, don't do it.
151
577k
  for (const auto *I : RD->fields())
152
55.5k
    if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked))
153
5
      return false;
154
577k
155
  // If there are no problems, lets do it.
156
577k
  return true;
157
577k
}
158
159
/// isSafeToConvert - Return true if it is safe to convert this field type,
160
/// which requires the structure elements contained by-value to all be
161
/// recursively safe to convert.
162
static bool
163
isSafeToConvert(QualType T, CodeGenTypes &CGT,
164
60.3k
                llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
165
  // Strip off atomic type sugar.
166
60.3k
  if (const auto *AT = T->getAs<AtomicType>())
167
615
    T = AT->getValueType();
168
60.3k
169
  // If this is a record, check it.
170
60.3k
  if (const auto *RT = T->getAs<RecordType>())
171
12.6k
    return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked);
172
47.7k
173
  // If this is an array, check the elements, which are embedded inline.
174
47.7k
  if (const auto *AT = CGT.getContext().getAsArrayType(T))
175
4.78k
    return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked);
176
42.9k
177
  // Otherwise, there is no concern about transforming this.  We only care about
178
  // things that are contained by-value in a structure that can have another
179
  // structure as a member.
180
42.9k
  return true;
181
42.9k
}
182
183
184
/// isSafeToConvert - Return true if it is safe to convert the specified record
185
/// decl to IR and lay it out, false if doing so would cause us to get into a
186
/// recursive compilation mess.
187
107k
static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) {
188
  // If no structs are being laid out, we can certainly do this one.
189
107k
  if (CGT.noRecordsBeingLaidOut()) 
return true81.0k
;
190
25.9k
191
25.9k
  llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked;
192
25.9k
  return isSafeToConvert(RD, CGT, AlreadyChecked);
193
25.9k
}
194
195
/// isFuncParamTypeConvertible - Return true if the specified type in a
196
/// function parameter or result position can be converted to an IR type at this
197
/// point.  This boils down to being whether it is complete, as well as whether
198
/// we've temporarily deferred expanding the type because we're in a recursive
199
/// context.
200
139k
bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
201
  // Some ABIs cannot have their member pointers represented in IR unless
202
  // certain circumstances have been reached.
203
139k
  if (const auto *MPT = Ty->getAs<MemberPointerType>())
204
52
    return getCXXABI().isMemberPointerConvertible(MPT);
205
139k
206
  // If this isn't a tagged type, we can convert it!
207
139k
  const TagType *TT = Ty->getAs<TagType>();
208
139k
  if (!TT) 
return true131k
;
209
7.46k
210
  // Incomplete types cannot be converted.
211
7.46k
  if (TT->isIncompleteType())
212
78
    return false;
213
7.38k
214
  // If this is an enum, then it is always safe to convert.
215
7.38k
  const RecordType *RT = dyn_cast<RecordType>(TT);
216
7.38k
  if (!RT) 
return true526
;
217
6.86k
218
  // Otherwise, we have to be careful.  If it is a struct that we're in the
219
  // process of expanding, then we can't convert the function type.  That's ok
220
  // though because we must be in a pointer context under the struct, so we can
221
  // just convert it to a dummy type.
222
  //
223
  // We decide this by checking whether ConvertRecordDeclType returns us an
224
  // opaque type for a struct that we know is defined.
225
6.86k
  return isSafeToConvert(RT->getDecl(), *this);
226
6.86k
}
227
228
229
/// Code to verify a given function type is complete, i.e. the return type
230
/// and all of the parameter types are complete.  Also check to see if we are in
231
/// a RS_StructPointer context, and if so whether any struct types have been
232
/// pended.  If so, we don't want to ask the ABI lowering code to handle a type
233
/// that cannot be converted to an IR type.
234
59.5k
bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
235
59.5k
  if (!isFuncParamTypeConvertible(FT->getReturnType()))
236
17
    return false;
237
59.5k
238
59.5k
  if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
239
138k
    
for (unsigned i = 0, e = FPT->getNumParams(); 58.9k
i != e;
i++79.7k
)
240
79.8k
      if (!isFuncParamTypeConvertible(FPT->getParamType(i)))
241
66
        return false;
242
59.5k
243
59.4k
  return true;
244
59.5k
}
245
246
/// UpdateCompletedType - When we find the full definition for a TagDecl,
247
/// replace the 'opaque' type we previously made for it if applicable.
248
2.29M
void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
249
  // If this is an enum being completed, then we flush all non-struct types from
250
  // the cache.  This allows function types and other things that may be derived
251
  // from the enum to be recomputed.
252
2.29M
  if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
253
    // Only flush the cache if we've actually already converted this type.
254
578k
    if (TypeCache.count(ED->getTypeForDecl())) {
255
      // Okay, we formed some types based on this.  We speculated that the enum
256
      // would be lowered to i32, so we only need to flush the cache if this
257
      // didn't happen.
258
1
      if (!ConvertType(ED->getIntegerType())->isIntegerTy(32))
259
0
        TypeCache.clear();
260
1
    }
261
    // If necessary, provide the full definition of a type only used with a
262
    // declaration so far.
263
578k
    if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
264
559k
      DI->completeType(ED);
265
578k
    return;
266
578k
  }
267
1.71M
268
  // If we completed a RecordDecl that we previously used and converted to an
269
  // anonymous type, then go ahead and complete it now.
270
1.71M
  const RecordDecl *RD = cast<RecordDecl>(TD);
271
1.71M
  if (RD->isDependentType()) 
return318k
;
272
1.39M
273
  // Only complete it if we converted it already.  If we haven't converted it
274
  // yet, we'll just do it lazily.
275
1.39M
  if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
276
673
    ConvertRecordDeclType(RD);
277
1.39M
278
  // If necessary, provide the full definition of a type only used with a
279
  // declaration so far.
280
1.39M
  if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
281
1.28M
    DI->completeType(RD);
282
1.39M
}
283
284
460
void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
285
460
  QualType T = Context.getRecordType(RD);
286
460
  T = Context.getCanonicalType(T);
287
460
288
460
  const Type *Ty = T.getTypePtr();
289
460
  if (RecordsWithOpaqueMemberPointers.count(Ty)) {
290
8
    TypeCache.clear();
291
8
    RecordsWithOpaqueMemberPointers.clear();
292
8
  }
293
460
}
294
295
static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
296
                                    const llvm::fltSemantics &format,
297
9.97k
                                    bool UseNativeHalf = false) {
298
9.97k
  if (&format == &llvm::APFloat::IEEEhalf()) {
299
552
    if (UseNativeHalf)
300
550
      return llvm::Type::getHalfTy(VMContext);
301
2
    else
302
2
      return llvm::Type::getInt16Ty(VMContext);
303
9.42k
  }
304
9.42k
  if (&format == &llvm::APFloat::BFloat())
305
172
    return llvm::Type::getBFloatTy(VMContext);
306
9.25k
  if (&format == &llvm::APFloat::IEEEsingle())
307
4.12k
    return llvm::Type::getFloatTy(VMContext);
308
5.12k
  if (&format == &llvm::APFloat::IEEEdouble())
309
4.73k
    return llvm::Type::getDoubleTy(VMContext);
310
390
  if (&format == &llvm::APFloat::IEEEquad())
311
83
    return llvm::Type::getFP128Ty(VMContext);
312
307
  if (&format == &llvm::APFloat::PPCDoubleDouble())
313
34
    return llvm::Type::getPPC_FP128Ty(VMContext);
314
273
  if (&format == &llvm::APFloat::x87DoubleExtended())
315
273
    return llvm::Type::getX86_FP80Ty(VMContext);
316
0
  llvm_unreachable("Unknown float format!");
317
0
}
318
319
52.2k
llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
320
52.2k
  assert(QFT.isCanonical());
321
52.2k
  const Type *Ty = QFT.getTypePtr();
322
52.2k
  const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr());
323
  // First, check whether we can build the full function type.  If the
324
  // function type depends on an incomplete type (e.g. a struct or enum), we
325
  // cannot lower the function type.
326
52.2k
  if (!isFuncTypeConvertible(FT)) {
327
    // This function's type depends on an incomplete tag type.
328
19
329
    // Force conversion of all the relevant record types, to make sure
330
    // we re-convert the FunctionType when appropriate.
331
19
    if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
332
8
      ConvertRecordDeclType(RT->getDecl());
333
19
    if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
334
31
      
for (unsigned i = 0, e = FPT->getNumParams(); 17
i != e;
i++14
)
335
14
        if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
336
12
          ConvertRecordDeclType(RT->getDecl());
337
19
338
19
    SkippedLayout = true;
339
19
340
    // Return a placeholder type.
341
19
    return llvm::StructType::get(getLLVMContext());
342
19
  }
343
52.2k
344
  // While we're converting the parameter types for a function, we don't want
345
  // to recursively convert any pointed-to structs.  Converting directly-used
346
  // structs is ok though.
347
52.2k
  if (!RecordsBeingLaidOut.insert(Ty).second) {
348
1
    SkippedLayout = true;
349
1
    return llvm::StructType::get(getLLVMContext());
350
1
  }
351
52.2k
352
  // The function type can be built; call the appropriate routines to
353
  // build it.
354
52.2k
  const CGFunctionInfo *FI;
355
52.2k
  if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
356
51.6k
    FI = &arrangeFreeFunctionType(
357
51.6k
        CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
358
582
  } else {
359
582
    const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
360
582
    FI = &arrangeFreeFunctionType(
361
582
        CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
362
582
  }
363
52.2k
364
52.2k
  llvm::Type *ResultType = nullptr;
365
  // If there is something higher level prodding our CGFunctionInfo, then
366
  // don't recurse into it again.
367
52.2k
  if (FunctionsBeingProcessed.count(FI)) {
368
2
369
2
    ResultType = llvm::StructType::get(getLLVMContext());
370
2
    SkippedLayout = true;
371
52.2k
  } else {
372
52.2k
373
    // Otherwise, we're good to go, go ahead and convert it.
374
52.2k
    ResultType = GetFunctionType(*FI);
375
52.2k
  }
376
52.2k
377
52.2k
  RecordsBeingLaidOut.erase(Ty);
378
52.2k
379
52.2k
  if (SkippedLayout)
380
27
    TypeCache.clear();
381
52.2k
382
52.2k
  if (RecordsBeingLaidOut.empty())
383
48.8k
    
while (48.7k
!DeferredRecords.empty())
384
98
      ConvertRecordDeclType(DeferredRecords.pop_back_val());
385
52.2k
  return ResultType;
386
52.2k
}
387
388
/// ConvertType - Convert the specified type to its LLVM form.
389
5.64M
llvm::Type *CodeGenTypes::ConvertType(QualType T) {
390
5.64M
  T = Context.getCanonicalType(T);
391
5.64M
392
5.64M
  const Type *Ty = T.getTypePtr();
393
5.64M
394
  // For the device-side compilation, CUDA device builtin surface/texture types
395
  // may be represented in different types.
396
5.64M
  if (Context.getLangOpts().CUDAIsDevice) {
397
7.21k
    if (T->isCUDADeviceBuiltinSurfaceType()) {
398
5
      if (auto *Ty = CGM.getTargetCodeGenInfo()
399
5
                         .getCUDADeviceBuiltinSurfaceDeviceType())
400
5
        return Ty;
401
7.20k
    } else if (T->isCUDADeviceBuiltinTextureType()) {
402
10
      if (auto *Ty = CGM.getTargetCodeGenInfo()
403
10
                         .getCUDADeviceBuiltinTextureDeviceType())
404
10
        return Ty;
405
5.64M
    }
406
7.21k
  }
407
5.64M
408
  // RecordTypes are cached and processed specially.
409
5.64M
  if (const RecordType *RT = dyn_cast<RecordType>(Ty))
410
336k
    return ConvertRecordDeclType(RT->getDecl());
411
5.30M
412
  // See if type is already cached.
413
5.30M
  llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = TypeCache.find(Ty);
414
  // If type is found in map then use it. Otherwise, convert type T.
415
5.30M
  if (TCI != TypeCache.end())
416
4.59M
    return TCI->second;
417
717k
418
  // If we don't have it in the cache, convert it now.
419
717k
  llvm::Type *ResultType = nullptr;
420
717k
  switch (Ty->getTypeClass()) {
421
0
  case Type::Record: // Handled above.
422
0
#define TYPE(Class, Base)
423
0
#define ABSTRACT_TYPE(Class, Base)
424
0
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
425
0
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
426
0
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
427
0
#include "clang/AST/TypeNodes.inc"
428
0
    llvm_unreachable("Non-canonical or dependent types aren't possible.");
429
0
430
372k
  case Type::Builtin: {
431
372k
    switch (cast<BuiltinType>(Ty)->getKind()) {
432
56.2k
    case BuiltinType::Void:
433
56.2k
    case BuiltinType::ObjCId:
434
56.2k
    case BuiltinType::ObjCClass:
435
56.2k
    case BuiltinType::ObjCSel:
436
      // LLVM void type can only be used as the result of a function call.  Just
437
      // map to the same as char.
438
56.2k
      ResultType = llvm::Type::getInt8Ty(getLLVMContext());
439
56.2k
      break;
440
56.2k
441
3.63k
    case BuiltinType::Bool:
442
      // Note that we always return bool as i1 for use as a scalar type.
443
3.63k
      ResultType = llvm::Type::getInt1Ty(getLLVMContext());
444
3.63k
      break;
445
56.2k
446
84.9k
    case BuiltinType::Char_S:
447
84.9k
    case BuiltinType::Char_U:
448
84.9k
    case BuiltinType::SChar:
449
84.9k
    case BuiltinType::UChar:
450
84.9k
    case BuiltinType::Short:
451
84.9k
    case BuiltinType::UShort:
452
84.9k
    case BuiltinType::Int:
453
84.9k
    case BuiltinType::UInt:
454
84.9k
    case BuiltinType::Long:
455
84.9k
    case BuiltinType::ULong:
456
84.9k
    case BuiltinType::LongLong:
457
84.9k
    case BuiltinType::ULongLong:
458
84.9k
    case BuiltinType::WChar_S:
459
84.9k
    case BuiltinType::WChar_U:
460
84.9k
    case BuiltinType::Char8:
461
84.9k
    case BuiltinType::Char16:
462
84.9k
    case BuiltinType::Char32:
463
84.9k
    case BuiltinType::ShortAccum:
464
84.9k
    case BuiltinType::Accum:
465
84.9k
    case BuiltinType::LongAccum:
466
84.9k
    case BuiltinType::UShortAccum:
467
84.9k
    case BuiltinType::UAccum:
468
84.9k
    case BuiltinType::ULongAccum:
469
84.9k
    case BuiltinType::ShortFract:
470
84.9k
    case BuiltinType::Fract:
471
84.9k
    case BuiltinType::LongFract:
472
84.9k
    case BuiltinType::UShortFract:
473
84.9k
    case BuiltinType::UFract:
474
84.9k
    case BuiltinType::ULongFract:
475
84.9k
    case BuiltinType::SatShortAccum:
476
84.9k
    case BuiltinType::SatAccum:
477
84.9k
    case BuiltinType::SatLongAccum:
478
84.9k
    case BuiltinType::SatUShortAccum:
479
84.9k
    case BuiltinType::SatUAccum:
480
84.9k
    case BuiltinType::SatULongAccum:
481
84.9k
    case BuiltinType::SatShortFract:
482
84.9k
    case BuiltinType::SatFract:
483
84.9k
    case BuiltinType::SatLongFract:
484
84.9k
    case BuiltinType::SatUShortFract:
485
84.9k
    case BuiltinType::SatUFract:
486
84.9k
    case BuiltinType::SatULongFract:
487
84.9k
      ResultType = llvm::IntegerType::get(getLLVMContext(),
488
84.9k
                                 static_cast<unsigned>(Context.getTypeSize(T)));
489
84.9k
      break;
490
84.9k
491
14
    case BuiltinType::Float16:
492
14
      ResultType =
493
14
          getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T),
494
14
                           /* UseNativeHalf = */ true);
495
14
      break;
496
84.9k
497
538
    case BuiltinType::Half:
498
      // Half FP can either be storage-only (lowered to i16) or native.
499
538
      ResultType = getTypeForFormat(
500
538
          getLLVMContext(), Context.getFloatTypeSemantics(T),
501
538
          Context.getLangOpts().NativeHalfType ||
502
507
              !Context.getTargetInfo().useFP16ConversionIntrinsics());
503
538
      break;
504
9.42k
    case BuiltinType::BFloat16:
505
9.42k
    case BuiltinType::Float:
506
9.42k
    case BuiltinType::Double:
507
9.42k
    case BuiltinType::LongDouble:
508
9.42k
    case BuiltinType::Float128:
509
9.42k
      ResultType = getTypeForFormat(getLLVMContext(),
510
9.42k
                                    Context.getFloatTypeSemantics(T),
511
9.42k
                                    /* UseNativeHalf = */ false);
512
9.42k
      break;
513
9.42k
514
531
    case BuiltinType::NullPtr:
515
      // Model std::nullptr_t as i8*
516
531
      ResultType = llvm::Type::getInt8PtrTy(getLLVMContext());
517
531
      break;
518
9.42k
519
162
    case BuiltinType::UInt128:
520
162
    case BuiltinType::Int128:
521
162
      ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
522
162
      break;
523
162
524
162
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
525
2.70k
    case BuiltinType::Id:
526
2.70k
#include 
"clang/Basic/OpenCLImageTypes.def"162
527
2.70k
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
528
900
    case BuiltinType::Id:
529
900
#include 
"clang/Basic/OpenCLExtensionTypes.def"75
530
75
    case BuiltinType::OCLSampler:
531
75
    case BuiltinType::OCLEvent:
532
75
    case BuiltinType::OCLClkEvent:
533
75
    case BuiltinType::OCLQueue:
534
75
    case BuiltinType::OCLReserveID:
535
75
      ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty);
536
75
      break;
537
217k
    case BuiltinType::SveInt8:
538
217k
    case BuiltinType::SveUint8:
539
217k
    case BuiltinType::SveInt8x2:
540
217k
    case BuiltinType::SveUint8x2:
541
217k
    case BuiltinType::SveInt8x3:
542
217k
    case BuiltinType::SveUint8x3:
543
217k
    case BuiltinType::SveInt8x4:
544
217k
    case BuiltinType::SveUint8x4:
545
217k
    case BuiltinType::SveInt16:
546
217k
    case BuiltinType::SveUint16:
547
217k
    case BuiltinType::SveInt16x2:
548
217k
    case BuiltinType::SveUint16x2:
549
217k
    case BuiltinType::SveInt16x3:
550
217k
    case BuiltinType::SveUint16x3:
551
217k
    case BuiltinType::SveInt16x4:
552
217k
    case BuiltinType::SveUint16x4:
553
217k
    case BuiltinType::SveInt32:
554
217k
    case BuiltinType::SveUint32:
555
217k
    case BuiltinType::SveInt32x2:
556
217k
    case BuiltinType::SveUint32x2:
557
217k
    case BuiltinType::SveInt32x3:
558
217k
    case BuiltinType::SveUint32x3:
559
217k
    case BuiltinType::SveInt32x4:
560
217k
    case BuiltinType::SveUint32x4:
561
217k
    case BuiltinType::SveInt64:
562
217k
    case BuiltinType::SveUint64:
563
217k
    case BuiltinType::SveInt64x2:
564
217k
    case BuiltinType::SveUint64x2:
565
217k
    case BuiltinType::SveInt64x3:
566
217k
    case BuiltinType::SveUint64x3:
567
217k
    case BuiltinType::SveInt64x4:
568
217k
    case BuiltinType::SveUint64x4:
569
217k
    case BuiltinType::SveBool:
570
217k
    case BuiltinType::SveFloat16:
571
217k
    case BuiltinType::SveFloat16x2:
572
217k
    case BuiltinType::SveFloat16x3:
573
217k
    case BuiltinType::SveFloat16x4:
574
217k
    case BuiltinType::SveFloat32:
575
217k
    case BuiltinType::SveFloat32x2:
576
217k
    case BuiltinType::SveFloat32x3:
577
217k
    case BuiltinType::SveFloat32x4:
578
217k
    case BuiltinType::SveFloat64:
579
217k
    case BuiltinType::SveFloat64x2:
580
217k
    case BuiltinType::SveFloat64x3:
581
217k
    case BuiltinType::SveFloat64x4:
582
217k
    case BuiltinType::SveBFloat16:
583
217k
    case BuiltinType::SveBFloat16x2:
584
217k
    case BuiltinType::SveBFloat16x3:
585
217k
    case BuiltinType::SveBFloat16x4: {
586
217k
      ASTContext::BuiltinVectorTypeInfo Info =
587
217k
          Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
588
217k
      return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
589
217k
                                           Info.EC.getKnownMinValue() *
590
217k
                                               Info.NumVectors);
591
217k
    }
592
0
    case BuiltinType::Dependent:
593
0
#define BUILTIN_TYPE(Id, SingletonId)
594
0
#define PLACEHOLDER_TYPE(Id, SingletonId) \
595
0
    case BuiltinType::Id:
596
0
#include "clang/AST/BuiltinTypes.def"
597
0
      llvm_unreachable("Unexpected placeholder builtin type!");
598
155k
    }
599
155k
    break;
600
155k
  }
601
0
  case Type::Auto:
602
0
  case Type::DeducedTemplateSpecialization:
603
0
    llvm_unreachable("Unexpected undeduced type!");
604
552
  case Type::Complex: {
605
552
    llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
606
552
    ResultType = llvm::StructType::get(EltTy, EltTy);
607
552
    break;
608
0
  }
609
31.3k
  case Type::LValueReference:
610
31.3k
  case Type::RValueReference: {
611
31.3k
    const ReferenceType *RTy = cast<ReferenceType>(Ty);
612
31.3k
    QualType ETy = RTy->getPointeeType();
613
31.3k
    llvm::Type *PointeeType = ConvertTypeForMem(ETy);
614
31.3k
    unsigned AS = Context.getTargetAddressSpace(ETy);
615
31.3k
    ResultType = llvm::PointerType::get(PointeeType, AS);
616
31.3k
    break;
617
31.3k
  }
618
137k
  case Type::Pointer: {
619
137k
    const PointerType *PTy = cast<PointerType>(Ty);
620
137k
    QualType ETy = PTy->getPointeeType();
621
137k
    llvm::Type *PointeeType = ConvertTypeForMem(ETy);
622
137k
    if (PointeeType->isVoidTy())
623
0
      PointeeType = llvm::Type::getInt8Ty(getLLVMContext());
624
137k
625
137k
    unsigned AS = PointeeType->isFunctionTy()
626
5.94k
                      ? getDataLayout().getProgramAddressSpace()
627
131k
                      : Context.getTargetAddressSpace(ETy);
628
137k
629
137k
    ResultType = llvm::PointerType::get(PointeeType, AS);
630
137k
    break;
631
31.3k
  }
632
31.3k
633
2.92k
  case Type::VariableArray: {
634
2.92k
    const VariableArrayType *A = cast<VariableArrayType>(Ty);
635
2.92k
    assert(A->getIndexTypeCVRQualifiers() == 0 &&
636
2.92k
           "FIXME: We only handle trivial array types so far!");
637
    // VLAs resolve to the innermost element type; this matches
638
    // the return of alloca, and there isn't any obviously better choice.
639
2.92k
    ResultType = ConvertTypeForMem(A->getElementType());
640
2.92k
    break;
641
31.3k
  }
642
211
  case Type::IncompleteArray: {
643
211
    const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty);
644
211
    assert(A->getIndexTypeCVRQualifiers() == 0 &&
645
211
           "FIXME: We only handle trivial array types so far!");
646
    // int X[] -> [0 x int], unless the element type is not sized.  If it is
647
    // unsized (e.g. an incomplete struct) just use [0 x i8].
648
211
    ResultType = ConvertTypeForMem(A->getElementType());
649
211
    if (!ResultType->isSized()) {
650
1
      SkippedLayout = true;
651
1
      ResultType = llvm::Type::getInt8Ty(getLLVMContext());
652
1
    }
653
211
    ResultType = llvm::ArrayType::get(ResultType, 0);
654
211
    break;
655
31.3k
  }
656
47.5k
  case Type::ConstantArray: {
657
47.5k
    const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
658
47.5k
    llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
659
47.5k
660
    // Lower arrays of undefined struct type to arrays of i8 just to have a
661
    // concrete type.
662
47.5k
    if (!EltTy->isSized()) {
663
3
      SkippedLayout = true;
664
3
      EltTy = llvm::Type::getInt8Ty(getLLVMContext());
665
3
    }
666
47.5k
667
47.5k
    ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue());
668
47.5k
    break;
669
31.3k
  }
670
5.23k
  case Type::ExtVector:
671
5.23k
  case Type::Vector: {
672
5.23k
    const VectorType *VT = cast<VectorType>(Ty);
673
5.23k
    ResultType = llvm::FixedVectorType::get(ConvertType(VT->getElementType()),
674
5.23k
                                            VT->getNumElements());
675
5.23k
    break;
676
5.23k
  }
677
39
  case Type::ConstantMatrix: {
678
39
    const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
679
39
    ResultType =
680
39
        llvm::FixedVectorType::get(ConvertType(MT->getElementType()),
681
39
                                   MT->getNumRows() * MT->getNumColumns());
682
39
    break;
683
5.23k
  }
684
52.2k
  case Type::FunctionNoProto:
685
52.2k
  case Type::FunctionProto:
686
52.2k
    ResultType = ConvertFunctionTypeInternal(T);
687
52.2k
    break;
688
25.6k
  case Type::ObjCObject:
689
25.6k
    ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
690
25.6k
    break;
691
52.2k
692
3.81k
  case Type::ObjCInterface: {
693
    // Objective-C interfaces are always opaque (outside of the
694
    // runtime, which can do whatever it likes); we never refine
695
    // these.
696
3.81k
    llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)];
697
3.81k
    if (!T)
698
3.81k
      T = llvm::StructType::create(getLLVMContext());
699
3.81k
    ResultType = T;
700
3.81k
    break;
701
52.2k
  }
702
52.2k
703
29.3k
  case Type::ObjCObjectPointer: {
704
    // Protocol qualifications do not influence the LLVM type, we just return a
705
    // pointer to the underlying interface type. We don't need to worry about
706
    // recursive conversion.
707
29.3k
    llvm::Type *T =
708
29.3k
      ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
709
29.3k
    ResultType = T->getPointerTo();
710
29.3k
    break;
711
52.2k
  }
712
52.2k
713
6.81k
  case Type::Enum: {
714
6.81k
    const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
715
6.81k
    if (ED->isCompleteDefinition() || 
ED->isFixed()67
)
716
6.80k
      return ConvertType(ED->getIntegerType());
717
    // Return a placeholder 'i32' type.  This can be changed later when the
718
    // type is defined (see UpdateCompletedType), but is likely to be the
719
    // "right" answer.
720
10
    ResultType = llvm::Type::getInt32Ty(getLLVMContext());
721
10
    break;
722
10
  }
723
10
724
596
  case Type::BlockPointer: {
725
596
    const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
726
596
    llvm::Type *PointeeType = CGM.getLangOpts().OpenCL
727
45
                                  ? CGM.getGenericBlockLiteralType()
728
551
                                  : ConvertTypeForMem(FTy);
729
596
    unsigned AS = Context.getTargetAddressSpace(FTy);
730
596
    ResultType = llvm::PointerType::get(PointeeType, AS);
731
596
    break;
732
10
  }
733
10
734
825
  case Type::MemberPointer: {
735
825
    auto *MPTy = cast<MemberPointerType>(Ty);
736
825
    if (!getCXXABI().isMemberPointerConvertible(MPTy)) {
737
21
      RecordsWithOpaqueMemberPointers.insert(MPTy->getClass());
738
21
      ResultType = llvm::StructType::create(getLLVMContext());
739
804
    } else {
740
804
      ResultType = getCXXABI().ConvertMemberPointerType(MPTy);
741
804
    }
742
825
    break;
743
10
  }
744
10
745
325
  case Type::Atomic: {
746
325
    QualType valueType = cast<AtomicType>(Ty)->getValueType();
747
325
    ResultType = ConvertTypeForMem(valueType);
748
325
749
    // Pad out to the inflated size if necessary.
750
325
    uint64_t valueSize = Context.getTypeSize(valueType);
751
325
    uint64_t atomicSize = Context.getTypeSize(Ty);
752
325
    if (valueSize != atomicSize) {
753
10
      assert(valueSize < atomicSize);
754
10
      llvm::Type *elts[] = {
755
10
        ResultType,
756
10
        llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8)
757
10
      };
758
10
      ResultType = llvm::StructType::get(getLLVMContext(),
759
10
                                         llvm::makeArrayRef(elts));
760
10
    }
761
325
    break;
762
10
  }
763
42
  case Type::Pipe: {
764
42
    ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty));
765
42
    break;
766
10
  }
767
391
  case Type::ExtInt: {
768
391
    const auto &EIT = cast<ExtIntType>(Ty);
769
391
    ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits());
770
391
    break;
771
494k
  }
772
494k
  }
773
494k
774
494k
  assert(ResultType && "Didn't convert a type?");
775
494k
776
494k
  TypeCache[Ty] = ResultType;
777
494k
  return ResultType;
778
494k
}
779
780
32
bool CodeGenModule::isPaddedAtomicType(QualType type) {
781
32
  return isPaddedAtomicType(type->castAs<AtomicType>());
782
32
}
783
784
32
bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) {
785
32
  return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType());
786
32
}
787
788
/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
789
361k
llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
790
  // TagDecl's are not necessarily unique, instead use the (clang)
791
  // type connected to the decl.
792
361k
  const Type *Key = Context.getTagDeclType(RD).getTypePtr();
793
361k
794
361k
  llvm::StructType *&Entry = RecordDeclTypes[Key];
795
361k
796
  // If we don't have a StructType at all yet, create the forward declaration.
797
361k
  if (!Entry) {
798
100k
    Entry = llvm::StructType::create(getLLVMContext());
799
100k
    addRecordTypeName(RD, Entry, "");
800
100k
  }
801
361k
  llvm::StructType *Ty = Entry;
802
361k
803
  // If this is still a forward declaration, or the LLVM type is already
804
  // complete, there's nothing more to do.
805
361k
  RD = RD->getDefinition();
806
361k
  if (!RD || 
!RD->isCompleteDefinition()359k
||
!Ty->isOpaque()359k
)
807
261k
    return Ty;
808
100k
809
  // If converting this type would cause us to infinitely loop, don't do it!
810
100k
  if (!isSafeToConvert(RD, *this)) {
811
609
    DeferredRecords.push_back(RD);
812
609
    return Ty;
813
609
  }
814
99.5k
815
  // Okay, this is a definition of a type.  Compile the implementation now.
816
99.5k
  bool InsertResult = RecordsBeingLaidOut.insert(Key).second;
817
99.5k
  (void)InsertResult;
818
99.5k
  assert(InsertResult && "Recursively compiling a struct?");
819
99.5k
820
  // Force conversion of non-virtual base classes recursively.
821
99.5k
  if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
822
17.1k
    for (const auto &I : CRD->bases()) {
823
17.1k
      if (I.isVirtual()) 
continue866
;
824
16.2k
      ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl());
825
16.2k
    }
826
67.6k
  }
827
99.5k
828
  // Layout fields.
829
99.5k
  std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty);
830
99.5k
  CGRecordLayouts[Key] = std::move(Layout);
831
99.5k
832
  // We're done laying out this struct.
833
99.5k
  bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult;
834
99.5k
  assert(EraseResult && "struct not in RecordsBeingLaidOut set?");
835
99.5k
836
  // If this struct blocked a FunctionType conversion, then recompute whatever
837
  // was derived from that.
838
  // FIXME: This is hugely overconservative.
839
99.5k
  if (SkippedLayout)
840
49
    TypeCache.clear();
841
99.5k
842
  // If we're done converting the outer-most record, then convert any deferred
843
  // structs as well.
844
99.5k
  if (RecordsBeingLaidOut.empty())
845
74.7k
    
while (74.2k
!DeferredRecords.empty())
846
511
      ConvertRecordDeclType(DeferredRecords.pop_back_val());
847
99.5k
848
99.5k
  return Ty;
849
99.5k
}
850
851
/// getCGRecordLayout - Return record layout info for the given record decl.
852
const CGRecordLayout &
853
217k
CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
854
217k
  const Type *Key = Context.getTagDeclType(RD).getTypePtr();
855
217k
856
217k
  auto I = CGRecordLayouts.find(Key);
857
217k
  if (I != CGRecordLayouts.end())
858
210k
    return *I->second;
859
  // Compute the type information.
860
6.71k
  ConvertRecordDeclType(RD);
861
6.71k
862
  // Now try again.
863
6.71k
  I = CGRecordLayouts.find(Key);
864
6.71k
865
6.71k
  assert(I != CGRecordLayouts.end() &&
866
6.71k
         "Unable to find record layout information for type");
867
6.71k
  return *I->second;
868
6.71k
}
869
870
15
bool CodeGenTypes::isPointerZeroInitializable(QualType T) {
871
15
  assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type");
872
15
  return isZeroInitializable(T);
873
15
}
874
875
208k
bool CodeGenTypes::isZeroInitializable(QualType T) {
876
208k
  if (T->getAs<PointerType>())
877
50.7k
    return Context.getTargetNullPointerValue(T) == 0;
878
157k
879
157k
  if (const auto *AT = Context.getAsArrayType(T)) {
880
10.8k
    if (isa<IncompleteArrayType>(AT))
881
55
      return true;
882
10.8k
    if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
883
10.8k
      if (Context.getConstantArrayElementCount(CAT) == 0)
884
1.13k
        return true;
885
9.69k
    T = Context.getBaseElementType(T);
886
9.69k
  }
887
157k
888
  // Records are non-zero-initializable if they contain any
889
  // non-zero-initializable subobjects.
890
156k
  if (const RecordType *RT = T->getAs<RecordType>()) {
891
20.3k
    const RecordDecl *RD = RT->getDecl();
892
20.3k
    return isZeroInitializable(RD);
893
20.3k
  }
894
136k
895
  // We have to ask the ABI about member pointers.
896
136k
  if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
897
289
    return getCXXABI().isZeroInitializable(MPT);
898
135k
899
  // Everything else is okay.
900
135k
  return true;
901
135k
}
902
903
31.0k
bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) {
904
31.0k
  return getCGRecordLayout(RD).isZeroInitializable();
905
31.0k
}