/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This is the code that handles AST -> LLVM type lowering. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "CodeGenTypes.h" |
14 | | #include "CGCXXABI.h" |
15 | | #include "CGCall.h" |
16 | | #include "CGOpenCLRuntime.h" |
17 | | #include "CGRecordLayout.h" |
18 | | #include "TargetInfo.h" |
19 | | #include "clang/AST/ASTContext.h" |
20 | | #include "clang/AST/DeclCXX.h" |
21 | | #include "clang/AST/DeclObjC.h" |
22 | | #include "clang/AST/Expr.h" |
23 | | #include "clang/AST/RecordLayout.h" |
24 | | #include "clang/CodeGen/CGFunctionInfo.h" |
25 | | #include "llvm/IR/DataLayout.h" |
26 | | #include "llvm/IR/DerivedTypes.h" |
27 | | #include "llvm/IR/Module.h" |
28 | | |
29 | | using namespace clang; |
30 | | using namespace CodeGen; |
31 | | |
32 | | CodeGenTypes::CodeGenTypes(CodeGenModule &cgm) |
33 | | : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()), |
34 | | Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), |
35 | 36.4k | TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) { |
36 | 36.4k | SkippedLayout = false; |
37 | 36.4k | } |
38 | | |
39 | 36.3k | CodeGenTypes::~CodeGenTypes() { |
40 | 36.3k | for (llvm::FoldingSet<CGFunctionInfo>::iterator |
41 | 273k | I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; ) |
42 | 237k | delete &*I++; |
43 | 36.3k | } |
44 | | |
45 | 21.3k | const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const { |
46 | 21.3k | return CGM.getCodeGenOpts(); |
47 | 21.3k | } |
48 | | |
49 | | void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, |
50 | | llvm::StructType *Ty, |
51 | 147k | StringRef suffix) { |
52 | 147k | SmallString<256> TypeName; |
53 | 147k | llvm::raw_svector_ostream OS(TypeName); |
54 | 147k | OS << RD->getKindName() << '.'; |
55 | | |
56 | | // FIXME: We probably want to make more tweaks to the printing policy. For |
57 | | // example, we should probably enable PrintCanonicalTypes and |
58 | | // FullyQualifiedNames. |
59 | 147k | PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy(); |
60 | 147k | Policy.SuppressInlineNamespace = false; |
61 | | |
62 | | // Name the codegen type after the typedef name |
63 | | // if there is no tag type name available |
64 | 147k | if (RD->getIdentifier()) { |
65 | | // FIXME: We should not have to check for a null decl context here. |
66 | | // Right now we do it because the implicit Obj-C decls don't have one. |
67 | 139k | if (RD->getDeclContext()) |
68 | 139k | RD->printQualifiedName(OS, Policy); |
69 | 0 | else |
70 | 0 | RD->printName(OS); |
71 | 139k | } else if (const TypedefNameDecl *7.36k TDD7.36k = RD->getTypedefNameForAnonDecl()) { |
72 | | // FIXME: We should not have to check for a null decl context here. |
73 | | // Right now we do it because the implicit Obj-C decls don't have one. |
74 | 1.84k | if (TDD->getDeclContext()) |
75 | 1.84k | TDD->printQualifiedName(OS, Policy); |
76 | 0 | else |
77 | 0 | TDD->printName(OS); |
78 | 1.84k | } else |
79 | 5.52k | OS << "anon"; |
80 | | |
81 | 147k | if (!suffix.empty()) |
82 | 10.4k | OS << suffix; |
83 | | |
84 | 147k | Ty->setName(OS.str()); |
85 | 147k | } |
86 | | |
87 | | /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from |
88 | | /// ConvertType in that it is used to convert to the memory representation for |
89 | | /// a type. For example, the scalar representation for _Bool is i1, but the |
90 | | /// memory representation is usually i8 or i32, depending on the target. |
91 | 4.29M | llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) { |
92 | 4.29M | if (T->isConstantMatrixType()) { |
93 | 1.07k | const Type *Ty = Context.getCanonicalType(T).getTypePtr(); |
94 | 1.07k | const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty); |
95 | 1.07k | return llvm::ArrayType::get(ConvertType(MT->getElementType()), |
96 | 1.07k | MT->getNumRows() * MT->getNumColumns()); |
97 | 1.07k | } |
98 | | |
99 | 4.29M | llvm::Type *R = ConvertType(T); |
100 | | |
101 | | // Check for the boolean vector case. |
102 | 4.29M | if (T->isExtVectorBoolType()) { |
103 | 42 | auto *FixedVT = cast<llvm::FixedVectorType>(R); |
104 | | // Pad to at least one byte. |
105 | 42 | uint64_t BytePadded = std::max<uint64_t>(FixedVT->getNumElements(), 8); |
106 | 42 | return llvm::IntegerType::get(FixedVT->getContext(), BytePadded); |
107 | 42 | } |
108 | | |
109 | | // If this is a bool type, or a bit-precise integer type in a bitfield |
110 | | // representation, map this integer to the target-specified size. |
111 | 4.29M | if ((ForBitField && T->isBitIntType()141 ) || |
112 | 4.29M | (4.29M !T->isBitIntType()4.29M && R->isIntegerTy(1)4.29M )) |
113 | 26.0k | return llvm::IntegerType::get(getLLVMContext(), |
114 | 26.0k | (unsigned)Context.getTypeSize(T)); |
115 | | |
116 | | // Else, don't map it. |
117 | 4.26M | return R; |
118 | 4.29M | } |
119 | | |
120 | | /// isRecordLayoutComplete - Return true if the specified type is already |
121 | | /// completely laid out. |
122 | 647k | bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const { |
123 | 647k | llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I = |
124 | 647k | RecordDeclTypes.find(Ty); |
125 | 647k | return I != RecordDeclTypes.end() && !I->second->isOpaque()48.9k ; |
126 | 647k | } |
127 | | |
128 | | static bool |
129 | | isSafeToConvert(QualType T, CodeGenTypes &CGT, |
130 | | llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked); |
131 | | |
132 | | |
133 | | /// isSafeToConvert - Return true if it is safe to convert the specified record |
134 | | /// decl to IR and lay it out, false if doing so would cause us to get into a |
135 | | /// recursive compilation mess. |
136 | | static bool |
137 | | isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT, |
138 | 648k | llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { |
139 | | // If we have already checked this type (maybe the same type is used by-value |
140 | | // multiple times in multiple structure fields, don't check again. |
141 | 648k | if (!AlreadyChecked.insert(RD).second) |
142 | 958 | return true; |
143 | | |
144 | 647k | const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr(); |
145 | | |
146 | | // If this type is already laid out, converting it is a noop. |
147 | 647k | if (CGT.isRecordLayoutComplete(Key)) return true3.52k ; |
148 | | |
149 | | // If this type is currently being laid out, we can't recursively compile it. |
150 | 644k | if (CGT.isRecordBeingLaidOut(Key)) |
151 | 1.04k | return false; |
152 | | |
153 | | // If this type would require laying out bases that are currently being laid |
154 | | // out, don't do it. This includes virtual base classes which get laid out |
155 | | // when a class is translated, even though they aren't embedded by-value into |
156 | | // the class. |
157 | 643k | if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { |
158 | 641k | for (const auto &I : CRD->bases()) |
159 | 580k | if (!isSafeToConvert(I.getType()->castAs<RecordType>()->getDecl(), CGT, |
160 | 580k | AlreadyChecked)) |
161 | 43 | return false; |
162 | 641k | } |
163 | | |
164 | | // If this type would require laying out members that are currently being laid |
165 | | // out, don't do it. |
166 | 643k | for (const auto *I : RD->fields()) |
167 | 91.5k | if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked)) |
168 | 3 | return false; |
169 | | |
170 | | // If there are no problems, lets do it. |
171 | 643k | return true; |
172 | 643k | } |
173 | | |
174 | | /// isSafeToConvert - Return true if it is safe to convert this field type, |
175 | | /// which requires the structure elements contained by-value to all be |
176 | | /// recursively safe to convert. |
177 | | static bool |
178 | | isSafeToConvert(QualType T, CodeGenTypes &CGT, |
179 | 99.4k | llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { |
180 | | // Strip off atomic type sugar. |
181 | 99.4k | if (const auto *AT = T->getAs<AtomicType>()) |
182 | 627 | T = AT->getValueType(); |
183 | | |
184 | | // If this is a record, check it. |
185 | 99.4k | if (const auto *RT = T->getAs<RecordType>()) |
186 | 22.3k | return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked); |
187 | | |
188 | | // If this is an array, check the elements, which are embedded inline. |
189 | 77.1k | if (const auto *AT = CGT.getContext().getAsArrayType(T)) |
190 | 7.96k | return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked); |
191 | | |
192 | | // Otherwise, there is no concern about transforming this. We only care about |
193 | | // things that are contained by-value in a structure that can have another |
194 | | // structure as a member. |
195 | 69.1k | return true; |
196 | 77.1k | } |
197 | | |
198 | | |
199 | | /// isSafeToConvert - Return true if it is safe to convert the specified record |
200 | | /// decl to IR and lay it out, false if doing so would cause us to get into a |
201 | | /// recursive compilation mess. |
202 | 143k | static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) { |
203 | | // If no structs are being laid out, we can certainly do this one. |
204 | 143k | if (CGT.noRecordsBeingLaidOut()) return true98.1k ; |
205 | | |
206 | 45.5k | llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked; |
207 | 45.5k | return isSafeToConvert(RD, CGT, AlreadyChecked); |
208 | 143k | } |
209 | | |
210 | | /// isFuncParamTypeConvertible - Return true if the specified type in a |
211 | | /// function parameter or result position can be converted to an IR type at this |
212 | | /// point. This boils down to being whether it is complete, as well as whether |
213 | | /// we've temporarily deferred expanding the type because we're in a recursive |
214 | | /// context. |
215 | 174k | bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) { |
216 | | // Some ABIs cannot have their member pointers represented in IR unless |
217 | | // certain circumstances have been reached. |
218 | 174k | if (const auto *MPT = Ty->getAs<MemberPointerType>()) |
219 | 57 | return getCXXABI().isMemberPointerConvertible(MPT); |
220 | | |
221 | | // If this isn't a tagged type, we can convert it! |
222 | 174k | const TagType *TT = Ty->getAs<TagType>(); |
223 | 174k | if (!TT) return true166k ; |
224 | | |
225 | | // Incomplete types cannot be converted. |
226 | 8.03k | if (TT->isIncompleteType()) |
227 | 87 | return false; |
228 | | |
229 | | // If this is an enum, then it is always safe to convert. |
230 | 7.95k | const RecordType *RT = dyn_cast<RecordType>(TT); |
231 | 7.95k | if (!RT) return true576 ; |
232 | | |
233 | | // Otherwise, we have to be careful. If it is a struct that we're in the |
234 | | // process of expanding, then we can't convert the function type. That's ok |
235 | | // though because we must be in a pointer context under the struct, so we can |
236 | | // just convert it to a dummy type. |
237 | | // |
238 | | // We decide this by checking whether ConvertRecordDeclType returns us an |
239 | | // opaque type for a struct that we know is defined. |
240 | 7.37k | return isSafeToConvert(RT->getDecl(), *this); |
241 | 7.95k | } |
242 | | |
243 | | |
244 | | /// Code to verify a given function type is complete, i.e. the return type |
245 | | /// and all of the parameter types are complete. Also check to see if we are in |
246 | | /// a RS_StructPointer context, and if so whether any struct types have been |
247 | | /// pended. If so, we don't want to ask the ABI lowering code to handle a type |
248 | | /// that cannot be converted to an IR type. |
249 | 70.5k | bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) { |
250 | 70.5k | if (!isFuncParamTypeConvertible(FT->getReturnType())) |
251 | 18 | return false; |
252 | | |
253 | 70.5k | if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) |
254 | 173k | for (unsigned i = 0, e = FPT->getNumParams(); 70.1k i != e; i++103k ) |
255 | 103k | if (!isFuncParamTypeConvertible(FPT->getParamType(i))) |
256 | 77 | return false; |
257 | | |
258 | 70.5k | return true; |
259 | 70.5k | } |
260 | | |
261 | | /// UpdateCompletedType - When we find the full definition for a TagDecl, |
262 | | /// replace the 'opaque' type we previously made for it if applicable. |
263 | 2.38M | void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { |
264 | | // If this is an enum being completed, then we flush all non-struct types from |
265 | | // the cache. This allows function types and other things that may be derived |
266 | | // from the enum to be recomputed. |
267 | 2.38M | if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) { |
268 | | // Only flush the cache if we've actually already converted this type. |
269 | 450k | if (TypeCache.count(ED->getTypeForDecl())) { |
270 | | // Okay, we formed some types based on this. We speculated that the enum |
271 | | // would be lowered to i32, so we only need to flush the cache if this |
272 | | // didn't happen. |
273 | 1 | if (!ConvertType(ED->getIntegerType())->isIntegerTy(32)) |
274 | 0 | TypeCache.clear(); |
275 | 1 | } |
276 | | // If necessary, provide the full definition of a type only used with a |
277 | | // declaration so far. |
278 | 450k | if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) |
279 | 442k | DI->completeType(ED); |
280 | 450k | return; |
281 | 450k | } |
282 | | |
283 | | // If we completed a RecordDecl that we previously used and converted to an |
284 | | // anonymous type, then go ahead and complete it now. |
285 | 1.93M | const RecordDecl *RD = cast<RecordDecl>(TD); |
286 | 1.93M | if (RD->isDependentType()) return413k ; |
287 | | |
288 | | // Only complete it if we converted it already. If we haven't converted it |
289 | | // yet, we'll just do it lazily. |
290 | 1.52M | if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr())) |
291 | 704 | ConvertRecordDeclType(RD); |
292 | | |
293 | | // If necessary, provide the full definition of a type only used with a |
294 | | // declaration so far. |
295 | 1.52M | if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) |
296 | 1.39M | DI->completeType(RD); |
297 | 1.52M | } |
298 | | |
299 | 476 | void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) { |
300 | 476 | QualType T = Context.getRecordType(RD); |
301 | 476 | T = Context.getCanonicalType(T); |
302 | | |
303 | 476 | const Type *Ty = T.getTypePtr(); |
304 | 476 | if (RecordsWithOpaqueMemberPointers.count(Ty)) { |
305 | 8 | TypeCache.clear(); |
306 | 8 | RecordsWithOpaqueMemberPointers.clear(); |
307 | 8 | } |
308 | 476 | } |
309 | | |
310 | | static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext, |
311 | | const llvm::fltSemantics &format, |
312 | 11.5k | bool UseNativeHalf = false) { |
313 | 11.5k | if (&format == &llvm::APFloat::IEEEhalf()) { |
314 | 891 | if (UseNativeHalf) |
315 | 888 | return llvm::Type::getHalfTy(VMContext); |
316 | 3 | else |
317 | 3 | return llvm::Type::getInt16Ty(VMContext); |
318 | 891 | } |
319 | 10.7k | if (&format == &llvm::APFloat::BFloat()) |
320 | 311 | return llvm::Type::getBFloatTy(VMContext); |
321 | 10.3k | if (&format == &llvm::APFloat::IEEEsingle()) |
322 | 4.73k | return llvm::Type::getFloatTy(VMContext); |
323 | 5.65k | if (&format == &llvm::APFloat::IEEEdouble()) |
324 | 5.20k | return llvm::Type::getDoubleTy(VMContext); |
325 | 448 | if (&format == &llvm::APFloat::IEEEquad()) |
326 | 106 | return llvm::Type::getFP128Ty(VMContext); |
327 | 342 | if (&format == &llvm::APFloat::PPCDoubleDouble()) |
328 | 40 | return llvm::Type::getPPC_FP128Ty(VMContext); |
329 | 302 | if (&format == &llvm::APFloat::x87DoubleExtended()) |
330 | 302 | return llvm::Type::getX86_FP80Ty(VMContext); |
331 | 0 | llvm_unreachable("Unknown float format!"); |
332 | 0 | } |
333 | | |
334 | 63.2k | llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) { |
335 | 63.2k | assert(QFT.isCanonical()); |
336 | 0 | const Type *Ty = QFT.getTypePtr(); |
337 | 63.2k | const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr()); |
338 | | // First, check whether we can build the full function type. If the |
339 | | // function type depends on an incomplete type (e.g. a struct or enum), we |
340 | | // cannot lower the function type. |
341 | 63.2k | if (!isFuncTypeConvertible(FT)) { |
342 | | // This function's type depends on an incomplete tag type. |
343 | | |
344 | | // Force conversion of all the relevant record types, to make sure |
345 | | // we re-convert the FunctionType when appropriate. |
346 | 31 | if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>()) |
347 | 9 | ConvertRecordDeclType(RT->getDecl()); |
348 | 31 | if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) |
349 | 55 | for (unsigned i = 0, e = FPT->getNumParams(); 30 i != e; i++25 ) |
350 | 25 | if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>()) |
351 | 19 | ConvertRecordDeclType(RT->getDecl()); |
352 | | |
353 | 31 | SkippedLayout = true; |
354 | | |
355 | | // Return a placeholder type. |
356 | 31 | return llvm::StructType::get(getLLVMContext()); |
357 | 31 | } |
358 | | |
359 | | // While we're converting the parameter types for a function, we don't want |
360 | | // to recursively convert any pointed-to structs. Converting directly-used |
361 | | // structs is ok though. |
362 | 63.2k | if (!RecordsBeingLaidOut.insert(Ty).second) { |
363 | 6 | SkippedLayout = true; |
364 | 6 | return llvm::StructType::get(getLLVMContext()); |
365 | 6 | } |
366 | | |
367 | | // The function type can be built; call the appropriate routines to |
368 | | // build it. |
369 | 63.2k | const CGFunctionInfo *FI; |
370 | 63.2k | if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) { |
371 | 62.8k | FI = &arrangeFreeFunctionType( |
372 | 62.8k | CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0))); |
373 | 62.8k | } else { |
374 | 394 | const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT); |
375 | 394 | FI = &arrangeFreeFunctionType( |
376 | 394 | CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0))); |
377 | 394 | } |
378 | | |
379 | 63.2k | llvm::Type *ResultType = nullptr; |
380 | | // If there is something higher level prodding our CGFunctionInfo, then |
381 | | // don't recurse into it again. |
382 | 63.2k | if (FunctionsBeingProcessed.count(FI)) { |
383 | | |
384 | 8 | ResultType = llvm::StructType::get(getLLVMContext()); |
385 | 8 | SkippedLayout = true; |
386 | 63.2k | } else { |
387 | | |
388 | | // Otherwise, we're good to go, go ahead and convert it. |
389 | 63.2k | ResultType = GetFunctionType(*FI); |
390 | 63.2k | } |
391 | | |
392 | 63.2k | RecordsBeingLaidOut.erase(Ty); |
393 | | |
394 | 63.2k | if (RecordsBeingLaidOut.empty()) |
395 | 56.2k | while (56.1k !DeferredRecords.empty()) |
396 | 140 | ConvertRecordDeclType(DeferredRecords.pop_back_val()); |
397 | 63.2k | return ResultType; |
398 | 63.2k | } |
399 | | |
400 | | /// ConvertType - Convert the specified type to its LLVM form. |
401 | 9.19M | llvm::Type *CodeGenTypes::ConvertType(QualType T) { |
402 | 9.19M | T = Context.getCanonicalType(T); |
403 | | |
404 | 9.19M | const Type *Ty = T.getTypePtr(); |
405 | | |
406 | | // For the device-side compilation, CUDA device builtin surface/texture types |
407 | | // may be represented in different types. |
408 | 9.19M | if (Context.getLangOpts().CUDAIsDevice) { |
409 | 14.4k | if (T->isCUDADeviceBuiltinSurfaceType()) { |
410 | 0 | if (auto *Ty = CGM.getTargetCodeGenInfo() |
411 | 0 | .getCUDADeviceBuiltinSurfaceDeviceType()) |
412 | 0 | return Ty; |
413 | 14.4k | } else if (T->isCUDADeviceBuiltinTextureType()) { |
414 | 0 | if (auto *Ty = CGM.getTargetCodeGenInfo() |
415 | 0 | .getCUDADeviceBuiltinTextureDeviceType()) |
416 | 0 | return Ty; |
417 | 0 | } |
418 | 14.4k | } |
419 | | |
420 | | // RecordTypes are cached and processed specially. |
421 | 9.19M | if (const RecordType *RT = dyn_cast<RecordType>(Ty)) |
422 | 1.02M | return ConvertRecordDeclType(RT->getDecl()); |
423 | | |
424 | | // The LLVM type we return for a given Clang type may not always be the same, |
425 | | // most notably when dealing with recursive structs. We mark these potential |
426 | | // cases with ShouldUseCache below. Builtin types cannot be recursive. |
427 | | // TODO: when clang uses LLVM opaque pointers we won't be able to represent |
428 | | // recursive types with LLVM types, making this logic much simpler. |
429 | 8.16M | llvm::Type *CachedType = nullptr; |
430 | 8.16M | bool ShouldUseCache = |
431 | 8.16M | Ty->isBuiltinType() || |
432 | 8.16M | (4.41M noRecordsBeingLaidOut()4.41M && FunctionsBeingProcessed.empty()4.17M ); |
433 | 8.16M | if (ShouldUseCache) { |
434 | 7.58M | llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = |
435 | 7.58M | TypeCache.find(Ty); |
436 | 7.58M | if (TCI != TypeCache.end()) |
437 | 6.56M | CachedType = TCI->second; |
438 | | // With expensive checks, check that the type we compute matches the |
439 | | // cached type. |
440 | 7.58M | #ifndef EXPENSIVE_CHECKS |
441 | 7.58M | if (CachedType) |
442 | 6.56M | return CachedType; |
443 | 7.58M | #endif |
444 | 7.58M | } |
445 | | |
446 | | // If we don't have it in the cache, convert it now. |
447 | 1.60M | llvm::Type *ResultType = nullptr; |
448 | 1.60M | switch (Ty->getTypeClass()) { |
449 | 0 | case Type::Record: // Handled above. |
450 | 0 | #define TYPE(Class, Base) |
451 | 0 | #define ABSTRACT_TYPE(Class, Base) |
452 | 0 | #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: |
453 | 0 | #define DEPENDENT_TYPE(Class, Base) case Type::Class: |
454 | 0 | #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: |
455 | 0 | #include "clang/AST/TypeNodes.inc" |
456 | 0 | llvm_unreachable("Non-canonical or dependent types aren't possible."); |
457 | |
|
458 | 657k | case Type::Builtin: { |
459 | 657k | switch (cast<BuiltinType>(Ty)->getKind()) { |
460 | 22.2k | case BuiltinType::Void: |
461 | 38.7k | case BuiltinType::ObjCId: |
462 | 55.1k | case BuiltinType::ObjCClass: |
463 | 71.6k | case BuiltinType::ObjCSel: |
464 | | // LLVM void type can only be used as the result of a function call. Just |
465 | | // map to the same as char. |
466 | 71.6k | ResultType = llvm::Type::getInt8Ty(getLLVMContext()); |
467 | 71.6k | break; |
468 | | |
469 | 3.29k | case BuiltinType::Bool: |
470 | | // Note that we always return bool as i1 for use as a scalar type. |
471 | 3.29k | ResultType = llvm::Type::getInt1Ty(getLLVMContext()); |
472 | 3.29k | break; |
473 | | |
474 | 9.60k | case BuiltinType::Char_S: |
475 | 9.68k | case BuiltinType::Char_U: |
476 | 11.7k | case BuiltinType::SChar: |
477 | 17.4k | case BuiltinType::UChar: |
478 | 36.8k | case BuiltinType::Short: |
479 | 38.9k | case BuiltinType::UShort: |
480 | 62.5k | case BuiltinType::Int: |
481 | 72.7k | case BuiltinType::UInt: |
482 | 92.9k | case BuiltinType::Long: |
483 | 103k | case BuiltinType::ULong: |
484 | 105k | case BuiltinType::LongLong: |
485 | 108k | case BuiltinType::ULongLong: |
486 | 108k | case BuiltinType::WChar_S: |
487 | 108k | case BuiltinType::WChar_U: |
488 | 108k | case BuiltinType::Char8: |
489 | 108k | case BuiltinType::Char16: |
490 | 108k | case BuiltinType::Char32: |
491 | 108k | case BuiltinType::ShortAccum: |
492 | 108k | case BuiltinType::Accum: |
493 | 108k | case BuiltinType::LongAccum: |
494 | 108k | case BuiltinType::UShortAccum: |
495 | 108k | case BuiltinType::UAccum: |
496 | 108k | case BuiltinType::ULongAccum: |
497 | 108k | case BuiltinType::ShortFract: |
498 | 108k | case BuiltinType::Fract: |
499 | 108k | case BuiltinType::LongFract: |
500 | 108k | case BuiltinType::UShortFract: |
501 | 108k | case BuiltinType::UFract: |
502 | 108k | case BuiltinType::ULongFract: |
503 | 108k | case BuiltinType::SatShortAccum: |
504 | 108k | case BuiltinType::SatAccum: |
505 | 108k | case BuiltinType::SatLongAccum: |
506 | 108k | case BuiltinType::SatUShortAccum: |
507 | 108k | case BuiltinType::SatUAccum: |
508 | 108k | case BuiltinType::SatULongAccum: |
509 | 108k | case BuiltinType::SatShortFract: |
510 | 108k | case BuiltinType::SatFract: |
511 | 108k | case BuiltinType::SatLongFract: |
512 | 108k | case BuiltinType::SatUShortFract: |
513 | 108k | case BuiltinType::SatUFract: |
514 | 108k | case BuiltinType::SatULongFract: |
515 | 108k | ResultType = llvm::IntegerType::get(getLLVMContext(), |
516 | 108k | static_cast<unsigned>(Context.getTypeSize(T))); |
517 | 108k | break; |
518 | | |
519 | 31 | case BuiltinType::Float16: |
520 | 31 | ResultType = |
521 | 31 | getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T), |
522 | 31 | /* UseNativeHalf = */ true); |
523 | 31 | break; |
524 | | |
525 | 861 | case BuiltinType::Half: |
526 | | // Half FP can either be storage-only (lowered to i16) or native. |
527 | 861 | ResultType = getTypeForFormat( |
528 | 861 | getLLVMContext(), Context.getFloatTypeSemantics(T), |
529 | 861 | Context.getLangOpts().NativeHalfType || |
530 | 861 | !Context.getTargetInfo().useFP16ConversionIntrinsics()811 ); |
531 | 861 | break; |
532 | 311 | case BuiltinType::BFloat16: |
533 | 5.04k | case BuiltinType::Float: |
534 | 10.2k | case BuiltinType::Double: |
535 | 10.6k | case BuiltinType::LongDouble: |
536 | 10.7k | case BuiltinType::Float128: |
537 | 10.7k | case BuiltinType::Ibm128: |
538 | 10.7k | ResultType = getTypeForFormat(getLLVMContext(), |
539 | 10.7k | Context.getFloatTypeSemantics(T), |
540 | 10.7k | /* UseNativeHalf = */ false); |
541 | 10.7k | break; |
542 | | |
543 | 791 | case BuiltinType::NullPtr: |
544 | | // Model std::nullptr_t as i8* |
545 | 791 | ResultType = llvm::Type::getInt8PtrTy(getLLVMContext()); |
546 | 791 | break; |
547 | | |
548 | 75 | case BuiltinType::UInt128: |
549 | 175 | case BuiltinType::Int128: |
550 | 175 | ResultType = llvm::IntegerType::get(getLLVMContext(), 128); |
551 | 175 | break; |
552 | | |
553 | 0 | #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ |
554 | 1.06k | case BuiltinType::Id: |
555 | 1.06k | #include "clang/Basic/OpenCLImageTypes.def"175 |
556 | 1.06k | #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ |
557 | 1.06k | case BuiltinType::Id:546 |
558 | 1.06k | #include "clang/Basic/OpenCLExtensionTypes.def"39 |
559 | 546 | case BuiltinType::OCLSampler: |
560 | 59 | case BuiltinType::OCLEvent: |
561 | 67 | case BuiltinType::OCLClkEvent: |
562 | 82 | case BuiltinType::OCLQueue: |
563 | 90 | case BuiltinType::OCLReserveID: |
564 | 90 | ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty); |
565 | 90 | break; |
566 | 25.8k | case BuiltinType::SveInt8: |
567 | 49.9k | case BuiltinType::SveUint8: |
568 | 50.1k | case BuiltinType::SveInt8x2: |
569 | 50.3k | case BuiltinType::SveUint8x2: |
570 | 50.5k | case BuiltinType::SveInt8x3: |
571 | 50.7k | case BuiltinType::SveUint8x3: |
572 | 50.9k | case BuiltinType::SveInt8x4: |
573 | 51.1k | case BuiltinType::SveUint8x4: |
574 | 83.1k | case BuiltinType::SveInt16: |
575 | 111k | case BuiltinType::SveUint16: |
576 | 111k | case BuiltinType::SveInt16x2: |
577 | 111k | case BuiltinType::SveUint16x2: |
578 | 111k | case BuiltinType::SveInt16x3: |
579 | 111k | case BuiltinType::SveUint16x3: |
580 | 112k | case BuiltinType::SveInt16x4: |
581 | 112k | case BuiltinType::SveUint16x4: |
582 | 151k | case BuiltinType::SveInt32: |
583 | 190k | case BuiltinType::SveUint32: |
584 | 190k | case BuiltinType::SveInt32x2: |
585 | 190k | case BuiltinType::SveUint32x2: |
586 | 190k | case BuiltinType::SveInt32x3: |
587 | 191k | case BuiltinType::SveUint32x3: |
588 | 191k | case BuiltinType::SveInt32x4: |
589 | 191k | case BuiltinType::SveUint32x4: |
590 | 228k | case BuiltinType::SveInt64: |
591 | 268k | case BuiltinType::SveUint64: |
592 | 269k | case BuiltinType::SveInt64x2: |
593 | 269k | case BuiltinType::SveUint64x2: |
594 | 269k | case BuiltinType::SveInt64x3: |
595 | 269k | case BuiltinType::SveUint64x3: |
596 | 269k | case BuiltinType::SveInt64x4: |
597 | 270k | case BuiltinType::SveUint64x4: |
598 | 391k | case BuiltinType::SveBool: |
599 | 411k | case BuiltinType::SveFloat16: |
600 | 412k | case BuiltinType::SveFloat16x2: |
601 | 412k | case BuiltinType::SveFloat16x3: |
602 | 412k | case BuiltinType::SveFloat16x4: |
603 | 434k | case BuiltinType::SveFloat32: |
604 | 434k | case BuiltinType::SveFloat32x2: |
605 | 434k | case BuiltinType::SveFloat32x3: |
606 | 434k | case BuiltinType::SveFloat32x4: |
607 | 454k | case BuiltinType::SveFloat64: |
608 | 455k | case BuiltinType::SveFloat64x2: |
609 | 455k | case BuiltinType::SveFloat64x3: |
610 | 455k | case BuiltinType::SveFloat64x4: |
611 | 460k | case BuiltinType::SveBFloat16: |
612 | 460k | case BuiltinType::SveBFloat16x2: |
613 | 460k | case BuiltinType::SveBFloat16x3: |
614 | 460k | case BuiltinType::SveBFloat16x4: { |
615 | 460k | ASTContext::BuiltinVectorTypeInfo Info = |
616 | 460k | Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty)); |
617 | 460k | return llvm::ScalableVectorType::get(ConvertType(Info.ElementType), |
618 | 460k | Info.EC.getKnownMinValue() * |
619 | 460k | Info.NumVectors); |
620 | 460k | } |
621 | 0 | #define PPC_VECTOR_TYPE(Name, Id, Size) \ |
622 | 24 | case BuiltinType::Id: \ |
623 | 24 | ResultType = \ |
624 | 24 | llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \ |
625 | 24 | break; |
626 | 460k | #include "clang/Basic/PPCTypes.def" |
627 | 2.24k | #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: |
628 | 48 | #include "clang/Basic/RISCVVTypes.def"12 |
629 | 48 | { |
630 | 48 | ASTContext::BuiltinVectorTypeInfo Info = |
631 | 48 | Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty)); |
632 | 48 | return llvm::ScalableVectorType::get(ConvertType(Info.ElementType), |
633 | 48 | Info.EC.getKnownMinValue() * |
634 | 48 | Info.NumVectors); |
635 | 2.19k | } |
636 | 0 | case BuiltinType::Dependent: |
637 | 0 | #define BUILTIN_TYPE(Id, SingletonId) |
638 | 0 | #define PLACEHOLDER_TYPE(Id, SingletonId) \ |
639 | 0 | case BuiltinType::Id: |
640 | 0 | #include "clang/AST/BuiltinTypes.def" |
641 | 0 | llvm_unreachable("Unexpected placeholder builtin type!"); |
642 | 657k | } |
643 | 196k | break; |
644 | 657k | } |
645 | 196k | case Type::Auto: |
646 | 0 | case Type::DeducedTemplateSpecialization: |
647 | 0 | llvm_unreachable("Unexpected undeduced type!"); |
648 | 2.27k | case Type::Complex: { |
649 | 2.27k | llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType()); |
650 | 2.27k | ResultType = llvm::StructType::get(EltTy, EltTy); |
651 | 2.27k | break; |
652 | 0 | } |
653 | 87.0k | case Type::LValueReference: |
654 | 99.2k | case Type::RValueReference: { |
655 | 99.2k | const ReferenceType *RTy = cast<ReferenceType>(Ty); |
656 | 99.2k | QualType ETy = RTy->getPointeeType(); |
657 | 99.2k | llvm::Type *PointeeType = ConvertTypeForMem(ETy); |
658 | 99.2k | unsigned AS = Context.getTargetAddressSpace(ETy); |
659 | 99.2k | ResultType = llvm::PointerType::get(PointeeType, AS); |
660 | 99.2k | break; |
661 | 87.0k | } |
662 | 491k | case Type::Pointer: { |
663 | 491k | const PointerType *PTy = cast<PointerType>(Ty); |
664 | 491k | QualType ETy = PTy->getPointeeType(); |
665 | 491k | llvm::Type *PointeeType = ConvertTypeForMem(ETy); |
666 | 491k | if (PointeeType->isVoidTy()) |
667 | 0 | PointeeType = llvm::Type::getInt8Ty(getLLVMContext()); |
668 | 491k | unsigned AS = Context.getTargetAddressSpace(ETy); |
669 | 491k | ResultType = llvm::PointerType::get(PointeeType, AS); |
670 | 491k | break; |
671 | 87.0k | } |
672 | | |
673 | 2.94k | case Type::VariableArray: { |
674 | 2.94k | const VariableArrayType *A = cast<VariableArrayType>(Ty); |
675 | 2.94k | assert(A->getIndexTypeCVRQualifiers() == 0 && |
676 | 2.94k | "FIXME: We only handle trivial array types so far!"); |
677 | | // VLAs resolve to the innermost element type; this matches |
678 | | // the return of alloca, and there isn't any obviously better choice. |
679 | 0 | ResultType = ConvertTypeForMem(A->getElementType()); |
680 | 2.94k | break; |
681 | 87.0k | } |
682 | 211 | case Type::IncompleteArray: { |
683 | 211 | const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty); |
684 | 211 | assert(A->getIndexTypeCVRQualifiers() == 0 && |
685 | 211 | "FIXME: We only handle trivial array types so far!"); |
686 | | // int X[] -> [0 x int], unless the element type is not sized. If it is |
687 | | // unsized (e.g. an incomplete struct) just use [0 x i8]. |
688 | 0 | ResultType = ConvertTypeForMem(A->getElementType()); |
689 | 211 | if (!ResultType->isSized()) { |
690 | 1 | SkippedLayout = true; |
691 | 1 | ResultType = llvm::Type::getInt8Ty(getLLVMContext()); |
692 | 1 | } |
693 | 211 | ResultType = llvm::ArrayType::get(ResultType, 0); |
694 | 211 | break; |
695 | 87.0k | } |
696 | 72.6k | case Type::ConstantArray: { |
697 | 72.6k | const ConstantArrayType *A = cast<ConstantArrayType>(Ty); |
698 | 72.6k | llvm::Type *EltTy = ConvertTypeForMem(A->getElementType()); |
699 | | |
700 | | // Lower arrays of undefined struct type to arrays of i8 just to have a |
701 | | // concrete type. |
702 | 72.6k | if (!EltTy->isSized()) { |
703 | 3 | SkippedLayout = true; |
704 | 3 | EltTy = llvm::Type::getInt8Ty(getLLVMContext()); |
705 | 3 | } |
706 | | |
707 | 72.6k | ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue()); |
708 | 72.6k | break; |
709 | 87.0k | } |
710 | 3.65k | case Type::ExtVector: |
711 | 41.3k | case Type::Vector: { |
712 | 41.3k | const auto *VT = cast<VectorType>(Ty); |
713 | | // An ext_vector_type of Bool is really a vector of bits. |
714 | 41.3k | llvm::Type *IRElemTy = VT->isExtVectorBoolType() |
715 | 41.3k | ? llvm::Type::getInt1Ty(getLLVMContext())26 |
716 | 41.3k | : ConvertType(VT->getElementType())41.2k ; |
717 | 41.3k | ResultType = llvm::FixedVectorType::get(IRElemTy, VT->getNumElements()); |
718 | 41.3k | break; |
719 | 3.65k | } |
720 | 300 | case Type::ConstantMatrix: { |
721 | 300 | const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty); |
722 | 300 | ResultType = |
723 | 300 | llvm::FixedVectorType::get(ConvertType(MT->getElementType()), |
724 | 300 | MT->getNumRows() * MT->getNumColumns()); |
725 | 300 | break; |
726 | 3.65k | } |
727 | 395 | case Type::FunctionNoProto: |
728 | 63.2k | case Type::FunctionProto: |
729 | 63.2k | ResultType = ConvertFunctionTypeInternal(T); |
730 | 63.2k | break; |
731 | 70.0k | case Type::ObjCObject: |
732 | 70.0k | ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType()); |
733 | 70.0k | break; |
734 | | |
735 | 9.65k | case Type::ObjCInterface: { |
736 | | // Objective-C interfaces are always opaque (outside of the |
737 | | // runtime, which can do whatever it likes); we never refine |
738 | | // these. |
739 | 9.65k | llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)]; |
740 | 9.65k | if (!T) |
741 | 3.80k | T = llvm::StructType::create(getLLVMContext()); |
742 | 9.65k | ResultType = T; |
743 | 9.65k | break; |
744 | 395 | } |
745 | | |
746 | 79.2k | case Type::ObjCObjectPointer: { |
747 | | // Protocol qualifications do not influence the LLVM type, we just return a |
748 | | // pointer to the underlying interface type. We don't need to worry about |
749 | | // recursive conversion. |
750 | 79.2k | llvm::Type *T = |
751 | 79.2k | ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); |
752 | 79.2k | ResultType = T->getPointerTo(); |
753 | 79.2k | break; |
754 | 395 | } |
755 | | |
756 | 9.99k | case Type::Enum: { |
757 | 9.99k | const EnumDecl *ED = cast<EnumType>(Ty)->getDecl(); |
758 | 9.99k | if (ED->isCompleteDefinition() || ED->isFixed()107 ) |
759 | 9.97k | return ConvertType(ED->getIntegerType()); |
760 | | // Return a placeholder 'i32' type. This can be changed later when the |
761 | | // type is defined (see UpdateCompletedType), but is likely to be the |
762 | | // "right" answer. |
763 | 19 | ResultType = llvm::Type::getInt32Ty(getLLVMContext()); |
764 | 19 | break; |
765 | 9.99k | } |
766 | | |
767 | 867 | case Type::BlockPointer: { |
768 | 867 | const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType(); |
769 | 867 | llvm::Type *PointeeType = CGM.getLangOpts().OpenCL |
770 | 867 | ? CGM.getGenericBlockLiteralType()80 |
771 | 867 | : ConvertTypeForMem(FTy)787 ; |
772 | | // Block pointers lower to function type. For function type, |
773 | | // getTargetAddressSpace() returns default address space for |
774 | | // function pointer i.e. program address space. Therefore, for block |
775 | | // pointers, it is important to pass qualifiers when calling |
776 | | // getTargetAddressSpace(), to ensure that we get the address space |
777 | | // for data pointers and not function pointers. |
778 | 867 | unsigned AS = Context.getTargetAddressSpace(FTy.getQualifiers()); |
779 | 867 | ResultType = llvm::PointerType::get(PointeeType, AS); |
780 | 867 | break; |
781 | 9.99k | } |
782 | | |
783 | 1.71k | case Type::MemberPointer: { |
784 | 1.71k | auto *MPTy = cast<MemberPointerType>(Ty); |
785 | 1.71k | if (!getCXXABI().isMemberPointerConvertible(MPTy)) { |
786 | 23 | auto *C = MPTy->getClass(); |
787 | 23 | auto Insertion = RecordsWithOpaqueMemberPointers.insert({C, nullptr}); |
788 | 23 | if (Insertion.second) |
789 | 20 | Insertion.first->second = llvm::StructType::create(getLLVMContext()); |
790 | 23 | ResultType = Insertion.first->second; |
791 | 1.69k | } else { |
792 | 1.69k | ResultType = getCXXABI().ConvertMemberPointerType(MPTy); |
793 | 1.69k | } |
794 | 1.71k | break; |
795 | 9.99k | } |
796 | | |
797 | 747 | case Type::Atomic: { |
798 | 747 | QualType valueType = cast<AtomicType>(Ty)->getValueType(); |
799 | 747 | ResultType = ConvertTypeForMem(valueType); |
800 | | |
801 | | // Pad out to the inflated size if necessary. |
802 | 747 | uint64_t valueSize = Context.getTypeSize(valueType); |
803 | 747 | uint64_t atomicSize = Context.getTypeSize(Ty); |
804 | 747 | if (valueSize != atomicSize) { |
805 | 24 | assert(valueSize < atomicSize); |
806 | 0 | llvm::Type *elts[] = { |
807 | 24 | ResultType, |
808 | 24 | llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8) |
809 | 24 | }; |
810 | 24 | ResultType = llvm::StructType::get(getLLVMContext(), |
811 | 24 | llvm::makeArrayRef(elts)); |
812 | 24 | } |
813 | 0 | break; |
814 | 9.99k | } |
815 | 239 | case Type::Pipe: { |
816 | 239 | ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty)); |
817 | 239 | break; |
818 | 9.99k | } |
819 | 1.02k | case Type::BitInt: { |
820 | 1.02k | const auto &EIT = cast<BitIntType>(Ty); |
821 | 1.02k | ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits()); |
822 | 1.02k | break; |
823 | 9.99k | } |
824 | 1.60M | } |
825 | | |
826 | 1.13M | assert(ResultType && "Didn't convert a type?"); |
827 | 0 | assert((!CachedType || CachedType == ResultType) && |
828 | 1.13M | "Cached type doesn't match computed type"); |
829 | | |
830 | 1.13M | if (ShouldUseCache) |
831 | 555k | TypeCache[Ty] = ResultType; |
832 | 1.13M | return ResultType; |
833 | 1.60M | } |
834 | | |
835 | 36 | bool CodeGenModule::isPaddedAtomicType(QualType type) { |
836 | 36 | return isPaddedAtomicType(type->castAs<AtomicType>()); |
837 | 36 | } |
838 | | |
839 | 36 | bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) { |
840 | 36 | return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType()); |
841 | 36 | } |
842 | | |
843 | | /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union. |
844 | 1.06M | llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) { |
845 | | // TagDecl's are not necessarily unique, instead use the (clang) |
846 | | // type connected to the decl. |
847 | 1.06M | const Type *Key = Context.getTagDeclType(RD).getTypePtr(); |
848 | | |
849 | 1.06M | llvm::StructType *&Entry = RecordDeclTypes[Key]; |
850 | | |
851 | | // If we don't have a StructType at all yet, create the forward declaration. |
852 | 1.06M | if (!Entry) { |
853 | 136k | Entry = llvm::StructType::create(getLLVMContext()); |
854 | 136k | addRecordTypeName(RD, Entry, ""); |
855 | 136k | } |
856 | 1.06M | llvm::StructType *Ty = Entry; |
857 | | |
858 | | // If this is still a forward declaration, or the LLVM type is already |
859 | | // complete, there's nothing more to do. |
860 | 1.06M | RD = RD->getDefinition(); |
861 | 1.06M | if (!RD || !RD->isCompleteDefinition()1.05M || !Ty->isOpaque()1.05M ) |
862 | 925k | return Ty; |
863 | | |
864 | | // If converting this type would cause us to infinitely loop, don't do it! |
865 | 136k | if (!isSafeToConvert(RD, *this)) { |
866 | 1.04k | DeferredRecords.push_back(RD); |
867 | 1.04k | return Ty; |
868 | 1.04k | } |
869 | | |
870 | | // Okay, this is a definition of a type. Compile the implementation now. |
871 | 135k | bool InsertResult = RecordsBeingLaidOut.insert(Key).second; |
872 | 135k | (void)InsertResult; |
873 | 135k | assert(InsertResult && "Recursively compiling a struct?"); |
874 | | |
875 | | // Force conversion of non-virtual base classes recursively. |
876 | 135k | if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { |
877 | 95.3k | for (const auto &I : CRD->bases()) { |
878 | 29.2k | if (I.isVirtual()) continue875 ; |
879 | 28.3k | ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl()); |
880 | 28.3k | } |
881 | 95.3k | } |
882 | | |
883 | | // Layout fields. |
884 | 135k | std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty); |
885 | 135k | CGRecordLayouts[Key] = std::move(Layout); |
886 | | |
887 | | // We're done laying out this struct. |
888 | 135k | bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult; |
889 | 135k | assert(EraseResult && "struct not in RecordsBeingLaidOut set?"); |
890 | | |
891 | | // If this struct blocked a FunctionType conversion, then recompute whatever |
892 | | // was derived from that. |
893 | | // FIXME: This is hugely overconservative. |
894 | 135k | if (SkippedLayout) |
895 | 66 | TypeCache.clear(); |
896 | | |
897 | | // If we're done converting the outer-most record, then convert any deferred |
898 | | // structs as well. |
899 | 135k | if (RecordsBeingLaidOut.empty()) |
900 | 91.8k | while (90.9k !DeferredRecords.empty()) |
901 | 903 | ConvertRecordDeclType(DeferredRecords.pop_back_val()); |
902 | | |
903 | 135k | return Ty; |
904 | 136k | } |
905 | | |
906 | | /// getCGRecordLayout - Return record layout info for the given record decl. |
907 | | const CGRecordLayout & |
908 | 298k | CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { |
909 | 298k | const Type *Key = Context.getTagDeclType(RD).getTypePtr(); |
910 | | |
911 | 298k | auto I = CGRecordLayouts.find(Key); |
912 | 298k | if (I != CGRecordLayouts.end()) |
913 | 293k | return *I->second; |
914 | | // Compute the type information. |
915 | 5.06k | ConvertRecordDeclType(RD); |
916 | | |
917 | | // Now try again. |
918 | 5.06k | I = CGRecordLayouts.find(Key); |
919 | | |
920 | 5.06k | assert(I != CGRecordLayouts.end() && |
921 | 5.06k | "Unable to find record layout information for type"); |
922 | 0 | return *I->second; |
923 | 298k | } |
924 | | |
925 | 18 | bool CodeGenTypes::isPointerZeroInitializable(QualType T) { |
926 | 18 | assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type"); |
927 | 0 | return isZeroInitializable(T); |
928 | 18 | } |
929 | | |
930 | 282k | bool CodeGenTypes::isZeroInitializable(QualType T) { |
931 | 282k | if (T->getAs<PointerType>()) |
932 | 66.1k | return Context.getTargetNullPointerValue(T) == 0; |
933 | | |
934 | 216k | if (const auto *AT = Context.getAsArrayType(T)) { |
935 | 15.2k | if (isa<IncompleteArrayType>(AT)) |
936 | 102 | return true; |
937 | 15.1k | if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) |
938 | 15.1k | if (Context.getConstantArrayElementCount(CAT) == 0) |
939 | 2.71k | return true; |
940 | 12.4k | T = Context.getBaseElementType(T); |
941 | 12.4k | } |
942 | | |
943 | | // Records are non-zero-initializable if they contain any |
944 | | // non-zero-initializable subobjects. |
945 | 213k | if (const RecordType *RT = T->getAs<RecordType>()) { |
946 | 27.6k | const RecordDecl *RD = RT->getDecl(); |
947 | 27.6k | return isZeroInitializable(RD); |
948 | 27.6k | } |
949 | | |
950 | | // We have to ask the ABI about member pointers. |
951 | 186k | if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) |
952 | 287 | return getCXXABI().isZeroInitializable(MPT); |
953 | | |
954 | | // Everything else is okay. |
955 | 186k | return true; |
956 | 186k | } |
957 | | |
958 | 42.9k | bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) { |
959 | 42.9k | return getCGRecordLayout(RD).isZeroInitializable(); |
960 | 42.9k | } |