/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGExpr.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This contains code to emit Expr nodes as LLVM code. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "CGCXXABI.h" |
14 | | #include "CGCall.h" |
15 | | #include "CGCleanup.h" |
16 | | #include "CGDebugInfo.h" |
17 | | #include "CGObjCRuntime.h" |
18 | | #include "CGOpenMPRuntime.h" |
19 | | #include "CGRecordLayout.h" |
20 | | #include "CodeGenFunction.h" |
21 | | #include "CodeGenModule.h" |
22 | | #include "ConstantEmitter.h" |
23 | | #include "TargetInfo.h" |
24 | | #include "clang/AST/ASTContext.h" |
25 | | #include "clang/AST/Attr.h" |
26 | | #include "clang/AST/DeclObjC.h" |
27 | | #include "clang/AST/NSAPI.h" |
28 | | #include "clang/Basic/Builtins.h" |
29 | | #include "clang/Basic/CodeGenOptions.h" |
30 | | #include "clang/Basic/SourceManager.h" |
31 | | #include "llvm/ADT/Hashing.h" |
32 | | #include "llvm/ADT/StringExtras.h" |
33 | | #include "llvm/IR/DataLayout.h" |
34 | | #include "llvm/IR/Intrinsics.h" |
35 | | #include "llvm/IR/LLVMContext.h" |
36 | | #include "llvm/IR/MDBuilder.h" |
37 | | #include "llvm/Support/ConvertUTF.h" |
38 | | #include "llvm/Support/MathExtras.h" |
39 | | #include "llvm/Support/Path.h" |
40 | | #include "llvm/Transforms/Utils/SanitizerStats.h" |
41 | | |
42 | | #include <string> |
43 | | |
44 | | using namespace clang; |
45 | | using namespace CodeGen; |
46 | | |
47 | | //===--------------------------------------------------------------------===// |
48 | | // Miscellaneous Helper Methods |
49 | | //===--------------------------------------------------------------------===// |
50 | | |
51 | 1.94k | llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { |
52 | 1.94k | unsigned addressSpace = |
53 | 1.94k | cast<llvm::PointerType>(value->getType())->getAddressSpace(); |
54 | | |
55 | 1.94k | llvm::PointerType *destType = Int8PtrTy; |
56 | 1.94k | if (addressSpace) |
57 | 15 | destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); |
58 | | |
59 | 1.94k | if (value->getType() == destType) return value146 ; |
60 | 1.79k | return Builder.CreateBitCast(value, destType); |
61 | 1.79k | } |
62 | | |
63 | | /// CreateTempAlloca - This creates a alloca and inserts it into the entry |
64 | | /// block. |
65 | | Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, |
66 | | CharUnits Align, |
67 | | const Twine &Name, |
68 | 958k | llvm::Value *ArraySize) { |
69 | 958k | auto Alloca = CreateTempAlloca(Ty, Name, ArraySize); |
70 | 958k | Alloca->setAlignment(Align.getAsAlign()); |
71 | 958k | return Address(Alloca, Align); |
72 | 958k | } |
73 | | |
74 | | /// CreateTempAlloca - This creates a alloca and inserts it into the entry |
75 | | /// block. The alloca is casted to default address space if necessary. |
76 | | Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align, |
77 | | const Twine &Name, |
78 | | llvm::Value *ArraySize, |
79 | 958k | Address *AllocaAddr) { |
80 | 958k | auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize); |
81 | 958k | if (AllocaAddr) |
82 | 228k | *AllocaAddr = Alloca; |
83 | 958k | llvm::Value *V = Alloca.getPointer(); |
84 | | // Alloca always returns a pointer in alloca address space, which may |
85 | | // be different from the type defined by the language. For example, |
86 | | // in C++ the auto variables are in the default address space. Therefore |
87 | | // cast alloca to the default address space when necessary. |
88 | 958k | if (getASTAllocaAddressSpace() != LangAS::Default) { |
89 | 2.85k | auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default); |
90 | 2.85k | llvm::IRBuilderBase::InsertPointGuard IPG(Builder); |
91 | | // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt, |
92 | | // otherwise alloca is inserted at the current insertion point of the |
93 | | // builder. |
94 | 2.85k | if (!ArraySize) |
95 | 2.85k | Builder.SetInsertPoint(AllocaInsertPt); |
96 | 2.85k | V = getTargetHooks().performAddrSpaceCast( |
97 | 2.85k | *this, V, getASTAllocaAddressSpace(), LangAS::Default, |
98 | 2.85k | Ty->getPointerTo(DestAddrSpace), /*non-null*/ true); |
99 | 2.85k | } |
100 | | |
101 | 958k | return Address(V, Align); |
102 | 958k | } |
103 | | |
104 | | /// CreateTempAlloca - This creates an alloca and inserts it into the entry |
105 | | /// block if \p ArraySize is nullptr, otherwise inserts it at the current |
106 | | /// insertion point of the builder. |
107 | | llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, |
108 | | const Twine &Name, |
109 | 966k | llvm::Value *ArraySize) { |
110 | 966k | if (ArraySize) |
111 | 2.15k | return Builder.CreateAlloca(Ty, ArraySize, Name); |
112 | 964k | return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(), |
113 | 964k | ArraySize, Name, AllocaInsertPt); |
114 | 964k | } |
115 | | |
116 | | /// CreateDefaultAlignTempAlloca - This creates an alloca with the |
117 | | /// default alignment of the corresponding LLVM type, which is *not* |
118 | | /// guaranteed to be related in any way to the expected alignment of |
119 | | /// an AST type that might have been lowered to Ty. |
120 | | Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty, |
121 | 11.4k | const Twine &Name) { |
122 | 11.4k | CharUnits Align = |
123 | 11.4k | CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlignment(Ty)); |
124 | 11.4k | return CreateTempAlloca(Ty, Align, Name); |
125 | 11.4k | } |
126 | | |
127 | 2.75k | void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) { |
128 | 2.75k | auto *Alloca = Var.getPointer(); |
129 | 2.75k | assert(isa<llvm::AllocaInst>(Alloca) || |
130 | 2.75k | (isa<llvm::AddrSpaceCastInst>(Alloca) && |
131 | 2.75k | isa<llvm::AllocaInst>( |
132 | 2.75k | cast<llvm::AddrSpaceCastInst>(Alloca)->getPointerOperand()))); |
133 | | |
134 | 2.75k | auto *Store = new llvm::StoreInst(Init, Alloca, /*volatile*/ false, |
135 | 2.75k | Var.getAlignment().getAsAlign()); |
136 | 2.75k | llvm::BasicBlock *Block = AllocaInsertPt->getParent(); |
137 | 2.75k | Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store); |
138 | 2.75k | } |
139 | | |
140 | 141k | Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) { |
141 | 141k | CharUnits Align = getContext().getTypeAlignInChars(Ty); |
142 | 141k | return CreateTempAlloca(ConvertType(Ty), Align, Name); |
143 | 141k | } |
144 | | |
145 | | Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name, |
146 | 105k | Address *Alloca) { |
147 | | // FIXME: Should we prefer the preferred type alignment here? |
148 | 105k | return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca); |
149 | 105k | } |
150 | | |
151 | | Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, |
152 | 584k | const Twine &Name, Address *Alloca) { |
153 | 584k | Address Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name, |
154 | 584k | /*ArraySize=*/nullptr, Alloca); |
155 | | |
156 | 584k | if (Ty->isConstantMatrixType()) { |
157 | 98 | auto *ArrayTy = cast<llvm::ArrayType>(Result.getType()->getElementType()); |
158 | 98 | auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(), |
159 | 98 | ArrayTy->getNumElements()); |
160 | | |
161 | 98 | Result = Address( |
162 | 98 | Builder.CreateBitCast(Result.getPointer(), VectorTy->getPointerTo()), |
163 | 98 | Result.getAlignment()); |
164 | 98 | } |
165 | 584k | return Result; |
166 | 584k | } |
167 | | |
168 | | Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align, |
169 | 268 | const Twine &Name) { |
170 | 268 | return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name); |
171 | 268 | } |
172 | | |
173 | | Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, |
174 | 0 | const Twine &Name) { |
175 | 0 | return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty), |
176 | 0 | Name); |
177 | 0 | } |
178 | | |
179 | | /// EvaluateExprAsBool - Perform the usual unary conversions on the specified |
180 | | /// expression and compare the result against zero, returning an Int1Ty value. |
181 | 115k | llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { |
182 | 115k | PGO.setCurrentStmt(E); |
183 | 115k | if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { |
184 | 0 | llvm::Value *MemPtr = EmitScalarExpr(E); |
185 | 0 | return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); |
186 | 0 | } |
187 | | |
188 | 115k | QualType BoolTy = getContext().BoolTy; |
189 | 115k | SourceLocation Loc = E->getExprLoc(); |
190 | 115k | CGFPOptionsRAII FPOptsRAII(*this, E); |
191 | 115k | if (!E->getType()->isAnyComplexType()) |
192 | 115k | return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc); |
193 | | |
194 | 1 | return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy, |
195 | 1 | Loc); |
196 | 1 | } |
197 | | |
198 | | /// EmitIgnoredExpr - Emit code to compute the specified expression, |
199 | | /// ignoring the result. |
200 | 316k | void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { |
201 | 316k | if (E->isRValue()) |
202 | 162k | return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); |
203 | | |
204 | | // Just emit it as an l-value and drop the result. |
205 | 153k | EmitLValue(E); |
206 | 153k | } |
207 | | |
208 | | /// EmitAnyExpr - Emit code to compute the specified expression which |
209 | | /// can have any type. The result is returned as an RValue struct. |
210 | | /// If this is an aggregate expression, AggSlot indicates where the |
211 | | /// result should be returned. |
212 | | RValue CodeGenFunction::EmitAnyExpr(const Expr *E, |
213 | | AggValueSlot aggSlot, |
214 | 574k | bool ignoreResult) { |
215 | 574k | switch (getEvaluationKind(E->getType())) { |
216 | 562k | case TEK_Scalar: |
217 | 562k | return RValue::get(EmitScalarExpr(E, ignoreResult)); |
218 | 877 | case TEK_Complex: |
219 | 877 | return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult)); |
220 | 10.5k | case TEK_Aggregate: |
221 | 10.5k | if (!ignoreResult && aggSlot.isIgnored()9.01k ) |
222 | 0 | aggSlot = CreateAggTemp(E->getType(), "agg-temp"); |
223 | 10.5k | EmitAggExpr(E, aggSlot); |
224 | 10.5k | return aggSlot.asRValue(); |
225 | 0 | } |
226 | 0 | llvm_unreachable("bad evaluation kind"); |
227 | 0 | } |
228 | | |
229 | | /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will |
230 | | /// always be accessible even if no aggregate location is provided. |
231 | 288k | RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { |
232 | 288k | AggValueSlot AggSlot = AggValueSlot::ignored(); |
233 | | |
234 | 288k | if (hasAggregateEvaluationKind(E->getType())) |
235 | 8.99k | AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); |
236 | 288k | return EmitAnyExpr(E, AggSlot); |
237 | 288k | } |
238 | | |
239 | | /// EmitAnyExprToMem - Evaluate an expression into a given memory |
240 | | /// location. |
241 | | void CodeGenFunction::EmitAnyExprToMem(const Expr *E, |
242 | | Address Location, |
243 | | Qualifiers Quals, |
244 | 20.8k | bool IsInit) { |
245 | | // FIXME: This function should take an LValue as an argument. |
246 | 20.8k | switch (getEvaluationKind(E->getType())) { |
247 | 12 | case TEK_Complex: |
248 | 12 | EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()), |
249 | 12 | /*isInit*/ false); |
250 | 12 | return; |
251 | | |
252 | 9.62k | case TEK_Aggregate: { |
253 | 9.62k | EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals, |
254 | 9.62k | AggValueSlot::IsDestructed_t(IsInit), |
255 | 9.62k | AggValueSlot::DoesNotNeedGCBarriers, |
256 | 9.62k | AggValueSlot::IsAliased_t(!IsInit), |
257 | 9.62k | AggValueSlot::MayOverlap)); |
258 | 9.62k | return; |
259 | 0 | } |
260 | | |
261 | 11.1k | case TEK_Scalar: { |
262 | 11.1k | RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); |
263 | 11.1k | LValue LV = MakeAddrLValue(Location, E->getType()); |
264 | 11.1k | EmitStoreThroughLValue(RV, LV); |
265 | 11.1k | return; |
266 | 0 | } |
267 | 0 | } |
268 | 0 | llvm_unreachable("bad evaluation kind"); |
269 | 0 | } |
270 | | |
271 | | static void |
272 | | pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, |
273 | 13.2k | const Expr *E, Address ReferenceTemporary) { |
274 | | // Objective-C++ ARC: |
275 | | // If we are binding a reference to a temporary that has ownership, we |
276 | | // need to perform retain/release operations on the temporary. |
277 | | // |
278 | | // FIXME: This should be looking at E, not M. |
279 | 13.2k | if (auto Lifetime = M->getType().getObjCLifetime()) { |
280 | 10 | switch (Lifetime) { |
281 | 0 | case Qualifiers::OCL_None: |
282 | 0 | case Qualifiers::OCL_ExplicitNone: |
283 | | // Carry on to normal cleanup handling. |
284 | 0 | break; |
285 | |
|
286 | 1 | case Qualifiers::OCL_Autoreleasing: |
287 | | // Nothing to do; cleaned up by an autorelease pool. |
288 | 1 | return; |
289 | |
|
290 | 8 | case Qualifiers::OCL_Strong: |
291 | 9 | case Qualifiers::OCL_Weak: |
292 | 9 | switch (StorageDuration Duration = M->getStorageDuration()) { |
293 | 2 | case SD_Static: |
294 | | // Note: we intentionally do not register a cleanup to release |
295 | | // the object on program termination. |
296 | 2 | return; |
297 | | |
298 | 0 | case SD_Thread: |
299 | | // FIXME: We should probably register a cleanup in this case. |
300 | 0 | return; |
301 | | |
302 | 4 | case SD_Automatic: |
303 | 7 | case SD_FullExpression: |
304 | 7 | CodeGenFunction::Destroyer *Destroy; |
305 | 7 | CleanupKind CleanupKind; |
306 | 7 | if (Lifetime == Qualifiers::OCL_Strong) { |
307 | 6 | const ValueDecl *VD = M->getExtendingDecl(); |
308 | 6 | bool Precise = |
309 | 6 | VD && isa<VarDecl>(VD)3 && VD->hasAttr<ObjCPreciseLifetimeAttr>()3 ; |
310 | 6 | CleanupKind = CGF.getARCCleanupKind(); |
311 | 0 | Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise |
312 | 6 | : &CodeGenFunction::destroyARCStrongImprecise; |
313 | 1 | } else { |
314 | | // __weak objects always get EH cleanups; otherwise, exceptions |
315 | | // could cause really nasty crashes instead of mere leaks. |
316 | 1 | CleanupKind = NormalAndEHCleanup; |
317 | 1 | Destroy = &CodeGenFunction::destroyARCWeak; |
318 | 1 | } |
319 | 7 | if (Duration == SD_FullExpression) |
320 | 3 | CGF.pushDestroy(CleanupKind, ReferenceTemporary, |
321 | 3 | M->getType(), *Destroy, |
322 | 3 | CleanupKind & EHCleanup); |
323 | 4 | else |
324 | 4 | CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary, |
325 | 4 | M->getType(), |
326 | 4 | *Destroy, CleanupKind & EHCleanup); |
327 | 7 | return; |
328 | | |
329 | 0 | case SD_Dynamic: |
330 | 0 | llvm_unreachable("temporary cannot have dynamic storage duration"); |
331 | 0 | } |
332 | 0 | llvm_unreachable("unknown storage duration"); |
333 | 13.2k | } |
334 | 13.2k | } |
335 | | |
336 | 13.2k | CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; |
337 | 13.2k | if (const RecordType *RT = |
338 | 8.60k | E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) { |
339 | | // Get the destructor for the reference temporary. |
340 | 8.60k | auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); |
341 | 8.60k | if (!ClassDecl->hasTrivialDestructor()) |
342 | 1.23k | ReferenceTemporaryDtor = ClassDecl->getDestructor(); |
343 | 8.60k | } |
344 | | |
345 | 13.2k | if (!ReferenceTemporaryDtor) |
346 | 11.9k | return; |
347 | | |
348 | | // Call the destructor for the temporary. |
349 | 1.23k | switch (M->getStorageDuration()) { |
350 | 56 | case SD_Static: |
351 | 64 | case SD_Thread: { |
352 | 64 | llvm::FunctionCallee CleanupFn; |
353 | 64 | llvm::Constant *CleanupArg; |
354 | 64 | if (E->getType()->isArrayType()) { |
355 | 4 | CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper( |
356 | 4 | ReferenceTemporary, E->getType(), |
357 | 4 | CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions, |
358 | 4 | dyn_cast_or_null<VarDecl>(M->getExtendingDecl())); |
359 | 4 | CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy); |
360 | 60 | } else { |
361 | 60 | CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor( |
362 | 60 | GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete)); |
363 | 60 | CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer()); |
364 | 60 | } |
365 | 64 | CGF.CGM.getCXXABI().registerGlobalDtor( |
366 | 64 | CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg); |
367 | 64 | break; |
368 | 56 | } |
369 | | |
370 | 1.05k | case SD_FullExpression: |
371 | 1.05k | CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(), |
372 | 1.05k | CodeGenFunction::destroyCXXObject, |
373 | 1.05k | CGF.getLangOpts().Exceptions); |
374 | 1.05k | break; |
375 | | |
376 | 111 | case SD_Automatic: |
377 | 111 | CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup, |
378 | 111 | ReferenceTemporary, E->getType(), |
379 | 111 | CodeGenFunction::destroyCXXObject, |
380 | 111 | CGF.getLangOpts().Exceptions); |
381 | 111 | break; |
382 | | |
383 | 0 | case SD_Dynamic: |
384 | 0 | llvm_unreachable("temporary cannot have dynamic storage duration"); |
385 | 1.23k | } |
386 | 1.23k | } |
387 | | |
388 | | static Address createReferenceTemporary(CodeGenFunction &CGF, |
389 | | const MaterializeTemporaryExpr *M, |
390 | | const Expr *Inner, |
391 | 13.2k | Address *Alloca = nullptr) { |
392 | 13.2k | auto &TCG = CGF.getTargetHooks(); |
393 | 13.2k | switch (M->getStorageDuration()) { |
394 | 12.8k | case SD_FullExpression: |
395 | 13.1k | case SD_Automatic: { |
396 | | // If we have a constant temporary array or record try to promote it into a |
397 | | // constant global under the same rules a normal constant would've been |
398 | | // promoted. This is easier on the optimizer and generally emits fewer |
399 | | // instructions. |
400 | 13.1k | QualType Ty = Inner->getType(); |
401 | 13.1k | if (CGF.CGM.getCodeGenOpts().MergeAllConstants && |
402 | 676 | (Ty->isArrayType() || Ty->isRecordType()626 ) && |
403 | 362 | CGF.CGM.isTypeConstant(Ty, true)) |
404 | 25 | if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) { |
405 | 9 | if (auto AddrSpace = CGF.getTarget().getConstantAddressSpace()) { |
406 | 9 | auto AS = AddrSpace.getValue(); |
407 | 9 | auto *GV = new llvm::GlobalVariable( |
408 | 9 | CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, |
409 | 9 | llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr, |
410 | 9 | llvm::GlobalValue::NotThreadLocal, |
411 | 9 | CGF.getContext().getTargetAddressSpace(AS)); |
412 | 9 | CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty); |
413 | 9 | GV->setAlignment(alignment.getAsAlign()); |
414 | 9 | llvm::Constant *C = GV; |
415 | 9 | if (AS != LangAS::Default) |
416 | 3 | C = TCG.performAddrSpaceCast( |
417 | 3 | CGF.CGM, GV, AS, LangAS::Default, |
418 | 3 | GV->getValueType()->getPointerTo( |
419 | 3 | CGF.getContext().getTargetAddressSpace(LangAS::Default))); |
420 | | // FIXME: Should we put the new global into a COMDAT? |
421 | 9 | return Address(C, alignment); |
422 | 9 | } |
423 | 13.1k | } |
424 | 13.1k | return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca); |
425 | 13.1k | } |
426 | 9 | case SD_Thread: |
427 | 107 | case SD_Static: |
428 | 107 | return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner); |
429 | | |
430 | 0 | case SD_Dynamic: |
431 | 0 | llvm_unreachable("temporary can't have dynamic storage duration"); |
432 | 0 | } |
433 | 0 | llvm_unreachable("unknown storage duration"); |
434 | 0 | } |
435 | | |
436 | | /// Helper method to check if the underlying ABI is AAPCS |
437 | 1.74k | static bool isAAPCS(const TargetInfo &TargetInfo) { |
438 | 1.74k | return TargetInfo.getABI().startswith("aapcs"); |
439 | 1.74k | } |
440 | | |
441 | | LValue CodeGenFunction:: |
442 | 13.2k | EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { |
443 | 13.2k | const Expr *E = M->getSubExpr(); |
444 | | |
445 | 13.2k | assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) || |
446 | 13.2k | !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) && |
447 | 13.2k | "Reference should never be pseudo-strong!"); |
448 | | |
449 | | // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so |
450 | | // as that will cause the lifetime adjustment to be lost for ARC |
451 | 13.2k | auto ownership = M->getType().getObjCLifetime(); |
452 | 13.2k | if (ownership != Qualifiers::OCL_None && |
453 | 11 | ownership != Qualifiers::OCL_ExplicitNone) { |
454 | 11 | Address Object = createReferenceTemporary(*this, M, E); |
455 | 11 | if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) { |
456 | 3 | Object = Address(llvm::ConstantExpr::getBitCast(Var, |
457 | 3 | ConvertTypeForMem(E->getType()) |
458 | 3 | ->getPointerTo(Object.getAddressSpace())), |
459 | 3 | Object.getAlignment()); |
460 | | |
461 | | // createReferenceTemporary will promote the temporary to a global with a |
462 | | // constant initializer if it can. It can only do this to a value of |
463 | | // ARC-manageable type if the value is global and therefore "immune" to |
464 | | // ref-counting operations. Therefore we have no need to emit either a |
465 | | // dynamic initialization or a cleanup and we can just return the address |
466 | | // of the temporary. |
467 | 3 | if (Var->hasInitializer()) |
468 | 1 | return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); |
469 | | |
470 | 2 | Var->setInitializer(CGM.EmitNullConstant(E->getType())); |
471 | 2 | } |
472 | 10 | LValue RefTempDst = MakeAddrLValue(Object, M->getType(), |
473 | 10 | AlignmentSource::Decl); |
474 | | |
475 | 10 | switch (getEvaluationKind(E->getType())) { |
476 | 0 | default: llvm_unreachable("expected scalar or aggregate expression"); |
477 | 6 | case TEK_Scalar: |
478 | 6 | EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false); |
479 | 6 | break; |
480 | 4 | case TEK_Aggregate: { |
481 | 4 | EmitAggExpr(E, AggValueSlot::forAddr(Object, |
482 | 4 | E->getType().getQualifiers(), |
483 | 4 | AggValueSlot::IsDestructed, |
484 | 4 | AggValueSlot::DoesNotNeedGCBarriers, |
485 | 4 | AggValueSlot::IsNotAliased, |
486 | 4 | AggValueSlot::DoesNotOverlap)); |
487 | 4 | break; |
488 | 10 | } |
489 | 10 | } |
490 | | |
491 | 10 | pushTemporaryCleanup(*this, M, E, Object); |
492 | 10 | return RefTempDst; |
493 | 10 | } |
494 | | |
495 | 13.2k | SmallVector<const Expr *, 2> CommaLHSs; |
496 | 13.2k | SmallVector<SubobjectAdjustment, 2> Adjustments; |
497 | 13.2k | E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); |
498 | | |
499 | 13.2k | for (const auto &Ignored : CommaLHSs) |
500 | 0 | EmitIgnoredExpr(Ignored); |
501 | | |
502 | 13.2k | if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) { |
503 | 3 | if (opaque->getType()->isRecordType()) { |
504 | 3 | assert(Adjustments.empty()); |
505 | 3 | return EmitOpaqueValueLValue(opaque); |
506 | 3 | } |
507 | 13.2k | } |
508 | | |
509 | | // Create and initialize the reference temporary. |
510 | 13.2k | Address Alloca = Address::invalid(); |
511 | 13.2k | Address Object = createReferenceTemporary(*this, M, E, &Alloca); |
512 | 13.2k | if (auto *Var = dyn_cast<llvm::GlobalVariable>( |
513 | 113 | Object.getPointer()->stripPointerCasts())) { |
514 | 113 | Object = Address(llvm::ConstantExpr::getBitCast( |
515 | 113 | cast<llvm::Constant>(Object.getPointer()), |
516 | 113 | ConvertTypeForMem(E->getType())->getPointerTo()), |
517 | 113 | Object.getAlignment()); |
518 | | // If the temporary is a global and has a constant initializer or is a |
519 | | // constant temporary that we promoted to a global, we may have already |
520 | | // initialized it. |
521 | 113 | if (!Var->hasInitializer()) { |
522 | 97 | Var->setInitializer(CGM.EmitNullConstant(E->getType())); |
523 | 97 | EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); |
524 | 97 | } |
525 | 13.1k | } else { |
526 | 13.1k | switch (M->getStorageDuration()) { |
527 | 218 | case SD_Automatic: |
528 | 218 | if (auto *Size = EmitLifetimeStart( |
529 | 22 | CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()), |
530 | 22 | Alloca.getPointer())) { |
531 | 22 | pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker, |
532 | 22 | Alloca, Size); |
533 | 22 | } |
534 | 218 | break; |
535 | | |
536 | 12.8k | case SD_FullExpression: { |
537 | 12.8k | if (!ShouldEmitLifetimeMarkers) |
538 | 12.3k | break; |
539 | | |
540 | | // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end |
541 | | // marker. Instead, start the lifetime of a conditional temporary earlier |
542 | | // so that it's unconditional. Don't do this with sanitizers which need |
543 | | // more precise lifetime marks. |
544 | 509 | ConditionalEvaluation *OldConditional = nullptr; |
545 | 509 | CGBuilderTy::InsertPoint OldIP; |
546 | 509 | if (isInConditionalBranch() && !E->getType().isDestructedType()96 && |
547 | 21 | !SanOpts.has(SanitizerKind::HWAddress) && |
548 | 20 | !SanOpts.has(SanitizerKind::Memory) && |
549 | 19 | !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) { |
550 | 18 | OldConditional = OutermostConditional; |
551 | 18 | OutermostConditional = nullptr; |
552 | | |
553 | 18 | OldIP = Builder.saveIP(); |
554 | 18 | llvm::BasicBlock *Block = OldConditional->getStartingBlock(); |
555 | 18 | Builder.restoreIP(CGBuilderTy::InsertPoint( |
556 | 18 | Block, llvm::BasicBlock::iterator(Block->back()))); |
557 | 18 | } |
558 | | |
559 | 509 | if (auto *Size = EmitLifetimeStart( |
560 | 509 | CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()), |
561 | 509 | Alloca.getPointer())) { |
562 | 509 | pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca, |
563 | 509 | Size); |
564 | 509 | } |
565 | | |
566 | 509 | if (OldConditional) { |
567 | 18 | OutermostConditional = OldConditional; |
568 | 18 | Builder.restoreIP(OldIP); |
569 | 18 | } |
570 | 509 | break; |
571 | 509 | } |
572 | | |
573 | 0 | default: |
574 | 0 | break; |
575 | 13.1k | } |
576 | 13.1k | EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); |
577 | 13.1k | } |
578 | 13.2k | pushTemporaryCleanup(*this, M, E, Object); |
579 | | |
580 | | // Perform derived-to-base casts and/or field accesses, to get from the |
581 | | // temporary object we created (and, potentially, for which we extended |
582 | | // the lifetime) to the subobject we're binding the reference to. |
583 | 13.2k | for (unsigned I = Adjustments.size(); I != 0; --I1 ) { |
584 | 1 | SubobjectAdjustment &Adjustment = Adjustments[I-1]; |
585 | 1 | switch (Adjustment.Kind) { |
586 | 0 | case SubobjectAdjustment::DerivedToBaseAdjustment: |
587 | 0 | Object = |
588 | 0 | GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass, |
589 | 0 | Adjustment.DerivedToBase.BasePath->path_begin(), |
590 | 0 | Adjustment.DerivedToBase.BasePath->path_end(), |
591 | 0 | /*NullCheckValue=*/ false, E->getExprLoc()); |
592 | 0 | break; |
593 | | |
594 | 1 | case SubobjectAdjustment::FieldAdjustment: { |
595 | 1 | LValue LV = MakeAddrLValue(Object, E->getType(), AlignmentSource::Decl); |
596 | 1 | LV = EmitLValueForField(LV, Adjustment.Field); |
597 | 1 | assert(LV.isSimple() && |
598 | 1 | "materialized temporary field is not a simple lvalue"); |
599 | 1 | Object = LV.getAddress(*this); |
600 | 1 | break; |
601 | 0 | } |
602 | | |
603 | 0 | case SubobjectAdjustment::MemberPointerAdjustment: { |
604 | 0 | llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS); |
605 | 0 | Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr, |
606 | 0 | Adjustment.Ptr.MPT); |
607 | 0 | break; |
608 | 0 | } |
609 | 1 | } |
610 | 1 | } |
611 | | |
612 | 13.2k | return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); |
613 | 13.2k | } |
614 | | |
615 | | RValue |
616 | 88.9k | CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) { |
617 | | // Emit the expression as an lvalue. |
618 | 88.9k | LValue LV = EmitLValue(E); |
619 | 88.9k | assert(LV.isSimple()); |
620 | 88.9k | llvm::Value *Value = LV.getPointer(*this); |
621 | | |
622 | 88.9k | if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()55 ) { |
623 | | // C++11 [dcl.ref]p5 (as amended by core issue 453): |
624 | | // If a glvalue to which a reference is directly bound designates neither |
625 | | // an existing object or function of an appropriate type nor a region of |
626 | | // storage of suitable size and alignment to contain an object of the |
627 | | // reference's type, the behavior is undefined. |
628 | 55 | QualType Ty = E->getType(); |
629 | 55 | EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty); |
630 | 55 | } |
631 | | |
632 | 88.9k | return RValue::get(Value); |
633 | 88.9k | } |
634 | | |
635 | | |
636 | | /// getAccessedFieldNo - Given an encoded value and a result number, return the |
637 | | /// input field number being accessed. |
638 | | unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, |
639 | 385 | const llvm::Constant *Elts) { |
640 | 385 | return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx)) |
641 | 385 | ->getZExtValue(); |
642 | 385 | } |
643 | | |
644 | | /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h. |
645 | | static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low, |
646 | 49 | llvm::Value *High) { |
647 | 49 | llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL); |
648 | 49 | llvm::Value *K47 = Builder.getInt64(47); |
649 | 49 | llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul); |
650 | 49 | llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0); |
651 | 49 | llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul); |
652 | 49 | llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0); |
653 | 49 | return Builder.CreateMul(B1, KMul); |
654 | 49 | } |
655 | | |
656 | 717 | bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) { |
657 | 717 | return TCK == TCK_DowncastPointer || TCK == TCK_Upcast707 || |
658 | 688 | TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation685 ; |
659 | 717 | } |
660 | | |
661 | 421 | bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) { |
662 | 421 | CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); |
663 | 421 | return (RD && RD->hasDefinition()210 && RD->isDynamicClass()210 ) && |
664 | 80 | (TCK == TCK_MemberAccess || TCK == TCK_MemberCall77 || |
665 | 46 | TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference39 || |
666 | 35 | TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation32 ); |
667 | 421 | } |
668 | | |
669 | 573k | bool CodeGenFunction::sanitizePerformTypeCheck() const { |
670 | 573k | return SanOpts.has(SanitizerKind::Null) | |
671 | 573k | SanOpts.has(SanitizerKind::Alignment) | |
672 | 573k | SanOpts.has(SanitizerKind::ObjectSize) | |
673 | 573k | SanOpts.has(SanitizerKind::Vptr); |
674 | 573k | } |
675 | | |
676 | | void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, |
677 | | llvm::Value *Ptr, QualType Ty, |
678 | | CharUnits Alignment, |
679 | | SanitizerSet SkippedChecks, |
680 | 463k | llvm::Value *ArraySize) { |
681 | 463k | if (!sanitizePerformTypeCheck()) |
682 | 462k | return; |
683 | | |
684 | | // Don't check pointers outside the default address space. The null check |
685 | | // isn't correct, the object-size check isn't supported by LLVM, and we can't |
686 | | // communicate the addresses to the runtime handler for the vptr check. |
687 | 720 | if (Ptr->getType()->getPointerAddressSpace()) |
688 | 2 | return; |
689 | | |
690 | | // Don't check pointers to volatile data. The behavior here is implementation- |
691 | | // defined. |
692 | 718 | if (Ty.isVolatileQualified()) |
693 | 1 | return; |
694 | | |
695 | 717 | SanitizerScope SanScope(this); |
696 | | |
697 | 717 | SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks; |
698 | 717 | llvm::BasicBlock *Done = nullptr; |
699 | | |
700 | | // Quickly determine whether we have a pointer to an alloca. It's possible |
701 | | // to skip null checks, and some alignment checks, for these pointers. This |
702 | | // can reduce compile-time significantly. |
703 | 717 | auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts()); |
704 | | |
705 | 717 | llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext()); |
706 | 717 | llvm::Value *IsNonNull = nullptr; |
707 | 717 | bool IsGuaranteedNonNull = |
708 | 717 | SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca571 ; |
709 | 717 | bool AllowNullPointers = isNullPointerAllowed(TCK); |
710 | 717 | if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers378 ) && |
711 | 354 | !IsGuaranteedNonNull) { |
712 | | // The glvalue must not be an empty glvalue. |
713 | 231 | IsNonNull = Builder.CreateIsNotNull(Ptr); |
714 | | |
715 | | // The IR builder can constant-fold the null check if the pointer points to |
716 | | // a constant. |
717 | 231 | IsGuaranteedNonNull = IsNonNull == True; |
718 | | |
719 | | // Skip the null check if the pointer is known to be non-null. |
720 | 231 | if (!IsGuaranteedNonNull) { |
721 | 225 | if (AllowNullPointers) { |
722 | | // When performing pointer casts, it's OK if the value is null. |
723 | | // Skip the remaining checks in that case. |
724 | 17 | Done = createBasicBlock("null"); |
725 | 17 | llvm::BasicBlock *Rest = createBasicBlock("not.null"); |
726 | 17 | Builder.CreateCondBr(IsNonNull, Rest, Done); |
727 | 17 | EmitBlock(Rest); |
728 | 208 | } else { |
729 | 208 | Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null)); |
730 | 208 | } |
731 | 225 | } |
732 | 231 | } |
733 | | |
734 | 717 | if (SanOpts.has(SanitizerKind::ObjectSize) && |
735 | 142 | !SkippedChecks.has(SanitizerKind::ObjectSize) && |
736 | 105 | !Ty->isIncompleteType()) { |
737 | 105 | uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity(); |
738 | 105 | llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize); |
739 | 105 | if (ArraySize) |
740 | 5 | Size = Builder.CreateMul(Size, ArraySize); |
741 | | |
742 | | // Degenerate case: new X[0] does not need an objectsize check. |
743 | 105 | llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size); |
744 | 105 | if (!ConstantSize || !ConstantSize->isNullValue()104 ) { |
745 | | // The glvalue must refer to a large enough storage region. |
746 | | // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation |
747 | | // to check this. |
748 | | // FIXME: Get object address space |
749 | 103 | llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy }; |
750 | 103 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys); |
751 | 103 | llvm::Value *Min = Builder.getFalse(); |
752 | 103 | llvm::Value *NullIsUnknown = Builder.getFalse(); |
753 | 103 | llvm::Value *Dynamic = Builder.getFalse(); |
754 | 103 | llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy); |
755 | 103 | llvm::Value *LargeEnough = Builder.CreateICmpUGE( |
756 | 103 | Builder.CreateCall(F, {CastAddr, Min, NullIsUnknown, Dynamic}), Size); |
757 | 103 | Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize)); |
758 | 103 | } |
759 | 105 | } |
760 | | |
761 | 717 | uint64_t AlignVal = 0; |
762 | 717 | llvm::Value *PtrAsInt = nullptr; |
763 | | |
764 | 717 | if (SanOpts.has(SanitizerKind::Alignment) && |
765 | 306 | !SkippedChecks.has(SanitizerKind::Alignment)) { |
766 | 268 | AlignVal = Alignment.getQuantity(); |
767 | 268 | if (!Ty->isIncompleteType() && !AlignVal) |
768 | 104 | AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr, |
769 | 104 | /*ForPointeeType=*/true) |
770 | 104 | .getQuantity(); |
771 | | |
772 | | // The glvalue must be suitably aligned. |
773 | 268 | if (AlignVal > 1 && |
774 | 185 | (!PtrToAlloca || PtrToAlloca->getAlignment() < AlignVal23 )) { |
775 | 162 | PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy); |
776 | 162 | llvm::Value *Align = Builder.CreateAnd( |
777 | 162 | PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal - 1)); |
778 | 162 | llvm::Value *Aligned = |
779 | 162 | Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0)); |
780 | 162 | if (Aligned != True) |
781 | 159 | Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment)); |
782 | 162 | } |
783 | 268 | } |
784 | | |
785 | 717 | if (Checks.size() > 0) { |
786 | | // Make sure we're not losing information. Alignment needs to be a power of |
787 | | // 2 |
788 | 341 | assert(!AlignVal || (uint64_t)1 << llvm::Log2_64(AlignVal) == AlignVal); |
789 | 341 | llvm::Constant *StaticData[] = { |
790 | 341 | EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty), |
791 | 236 | llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2_64(AlignVal) : 1105 ), |
792 | 341 | llvm::ConstantInt::get(Int8Ty, TCK)}; |
793 | 341 | EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData, |
794 | 182 | PtrAsInt ? PtrAsInt159 : Ptr); |
795 | 341 | } |
796 | | |
797 | | // If possible, check that the vptr indicates that there is a subobject of |
798 | | // type Ty at offset zero within this object. |
799 | | // |
800 | | // C++11 [basic.life]p5,6: |
801 | | // [For storage which does not refer to an object within its lifetime] |
802 | | // The program has undefined behavior if: |
803 | | // -- the [pointer or glvalue] is used to access a non-static data member |
804 | | // or call a non-static member function |
805 | 717 | if (SanOpts.has(SanitizerKind::Vptr) && |
806 | 421 | !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) { |
807 | | // Ensure that the pointer is non-null before loading it. If there is no |
808 | | // compile-time guarantee, reuse the run-time null check or emit a new one. |
809 | 50 | if (!IsGuaranteedNonNull) { |
810 | 22 | if (!IsNonNull) |
811 | 7 | IsNonNull = Builder.CreateIsNotNull(Ptr); |
812 | 22 | if (!Done) |
813 | 17 | Done = createBasicBlock("vptr.null"); |
814 | 22 | llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null"); |
815 | 22 | Builder.CreateCondBr(IsNonNull, VptrNotNull, Done); |
816 | 22 | EmitBlock(VptrNotNull); |
817 | 22 | } |
818 | | |
819 | | // Compute a hash of the mangled name of the type. |
820 | | // |
821 | | // FIXME: This is not guaranteed to be deterministic! Move to a |
822 | | // fingerprinting mechanism once LLVM provides one. For the time |
823 | | // being the implementation happens to be deterministic. |
824 | 50 | SmallString<64> MangledName; |
825 | 50 | llvm::raw_svector_ostream Out(MangledName); |
826 | 50 | CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(), |
827 | 50 | Out); |
828 | | |
829 | | // Blacklist based on the mangled type. |
830 | 50 | if (!CGM.getContext().getSanitizerBlacklist().isBlacklistedType( |
831 | 49 | SanitizerKind::Vptr, Out.str())) { |
832 | 49 | llvm::hash_code TypeHash = hash_value(Out.str()); |
833 | | |
834 | | // Load the vptr, and compute hash_16_bytes(TypeHash, vptr). |
835 | 49 | llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash); |
836 | 49 | llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0); |
837 | 49 | Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign()); |
838 | 49 | llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr); |
839 | 49 | llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty); |
840 | | |
841 | 49 | llvm::Value *Hash = emitHash16Bytes(Builder, Low, High); |
842 | 49 | Hash = Builder.CreateTrunc(Hash, IntPtrTy); |
843 | | |
844 | | // Look the hash up in our cache. |
845 | 49 | const int CacheSize = 128; |
846 | 49 | llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize); |
847 | 49 | llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable, |
848 | 49 | "__ubsan_vptr_type_cache"); |
849 | 49 | llvm::Value *Slot = Builder.CreateAnd(Hash, |
850 | 49 | llvm::ConstantInt::get(IntPtrTy, |
851 | 49 | CacheSize-1)); |
852 | 49 | llvm::Value *Indices[] = { Builder.getInt32(0), Slot }; |
853 | 49 | llvm::Value *CacheVal = |
854 | 49 | Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(Cache, Indices), |
855 | 49 | getPointerAlign()); |
856 | | |
857 | | // If the hash isn't in the cache, call a runtime handler to perform the |
858 | | // hard work of checking whether the vptr is for an object of the right |
859 | | // type. This will either fill in the cache and return, or produce a |
860 | | // diagnostic. |
861 | 49 | llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash); |
862 | 49 | llvm::Constant *StaticData[] = { |
863 | 49 | EmitCheckSourceLocation(Loc), |
864 | 49 | EmitCheckTypeDescriptor(Ty), |
865 | 49 | CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()), |
866 | 49 | llvm::ConstantInt::get(Int8Ty, TCK) |
867 | 49 | }; |
868 | 49 | llvm::Value *DynamicData[] = { Ptr, Hash }; |
869 | 49 | EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr), |
870 | 49 | SanitizerHandler::DynamicTypeCacheMiss, StaticData, |
871 | 49 | DynamicData); |
872 | 49 | } |
873 | 50 | } |
874 | | |
875 | 717 | if (Done) { |
876 | 34 | Builder.CreateBr(Done); |
877 | 34 | EmitBlock(Done); |
878 | 34 | } |
879 | 717 | } |
880 | | |
881 | | /// Determine whether this expression refers to a flexible array member in a |
882 | | /// struct. We disable array bounds checks for such members. |
883 | 31 | static bool isFlexibleArrayMemberExpr(const Expr *E) { |
884 | | // For compatibility with existing code, we treat arrays of length 0 or |
885 | | // 1 as flexible array members. |
886 | | // FIXME: This is inconsistent with the warning code in SemaChecking. Unify |
887 | | // the two mechanisms. |
888 | 31 | const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe(); |
889 | 31 | if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { |
890 | | // FIXME: Sema doesn't treat [1] as a flexible array member if the bound |
891 | | // was produced by macro expansion. |
892 | 28 | if (CAT->getSize().ugt(1)) |
893 | 17 | return false; |
894 | 3 | } else if (!isa<IncompleteArrayType>(AT)) |
895 | 2 | return false; |
896 | | |
897 | 12 | E = E->IgnoreParens(); |
898 | | |
899 | | // A flexible array member must be the last member in the class. |
900 | 12 | if (const auto *ME = dyn_cast<MemberExpr>(E)) { |
901 | | // FIXME: If the base type of the member expr is not FD->getParent(), |
902 | | // this should not be treated as a flexible array member access. |
903 | 5 | if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) { |
904 | | // FIXME: Sema doesn't treat a T[1] union member as a flexible array |
905 | | // member, only a T[0] or T[] member gets that treatment. |
906 | 5 | if (FD->getParent()->isUnion()) |
907 | 4 | return true; |
908 | 1 | RecordDecl::field_iterator FI( |
909 | 1 | DeclContext::decl_iterator(const_cast<FieldDecl *>(FD))); |
910 | 1 | return ++FI == FD->getParent()->field_end(); |
911 | 1 | } |
912 | 7 | } else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E)) { |
913 | 4 | return IRE->getDecl()->getNextIvar() == nullptr; |
914 | 4 | } |
915 | | |
916 | 3 | return false; |
917 | 3 | } |
918 | | |
919 | | llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E, |
920 | 16 | QualType EltTy) { |
921 | 16 | ASTContext &C = getContext(); |
922 | 16 | uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity(); |
923 | 16 | if (!EltSize) |
924 | 1 | return nullptr; |
925 | | |
926 | 15 | auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()); |
927 | 15 | if (!ArrayDeclRef) |
928 | 7 | return nullptr; |
929 | | |
930 | 8 | auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl()); |
931 | 8 | if (!ParamDecl) |
932 | 2 | return nullptr; |
933 | | |
934 | 6 | auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>(); |
935 | 6 | if (!POSAttr) |
936 | 2 | return nullptr; |
937 | | |
938 | | // Don't load the size if it's a lower bound. |
939 | 4 | int POSType = POSAttr->getType(); |
940 | 4 | if (POSType != 0 && POSType != 13 ) |
941 | 2 | return nullptr; |
942 | | |
943 | | // Find the implicit size parameter. |
944 | 2 | auto PassedSizeIt = SizeArguments.find(ParamDecl); |
945 | 2 | if (PassedSizeIt == SizeArguments.end()) |
946 | 0 | return nullptr; |
947 | | |
948 | 2 | const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second; |
949 | 2 | assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable"); |
950 | 2 | Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second; |
951 | 2 | llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false, |
952 | 2 | C.getSizeType(), E->getExprLoc()); |
953 | 2 | llvm::Value *SizeOfElement = |
954 | 2 | llvm::ConstantInt::get(SizeInBytes->getType(), EltSize); |
955 | 2 | return Builder.CreateUDiv(SizeInBytes, SizeOfElement); |
956 | 2 | } |
957 | | |
958 | | /// If Base is known to point to the start of an array, return the length of |
959 | | /// that array. Return 0 if the length cannot be determined. |
960 | | static llvm::Value *getArrayIndexingBound( |
961 | 40 | CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) { |
962 | | // For the vector indexing extension, the bound is the number of elements. |
963 | 40 | if (const VectorType *VT = Base->getType()->getAs<VectorType>()) { |
964 | 1 | IndexedType = Base->getType(); |
965 | 1 | return CGF.Builder.getInt32(VT->getNumElements()); |
966 | 1 | } |
967 | | |
968 | 39 | Base = Base->IgnoreParens(); |
969 | | |
970 | 39 | if (const auto *CE = dyn_cast<CastExpr>(Base)) { |
971 | 39 | if (CE->getCastKind() == CK_ArrayToPointerDecay && |
972 | 31 | !isFlexibleArrayMemberExpr(CE->getSubExpr())) { |
973 | 24 | IndexedType = CE->getSubExpr()->getType(); |
974 | 24 | const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe(); |
975 | 24 | if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) |
976 | 21 | return CGF.Builder.getInt(CAT->getSize()); |
977 | 3 | else if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) |
978 | 2 | return CGF.getVLASize(VAT).NumElts; |
979 | | // Ignore pass_object_size here. It's not applicable on decayed pointers. |
980 | 24 | } |
981 | 39 | } |
982 | | |
983 | 16 | QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0}; |
984 | 16 | if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) { |
985 | 2 | IndexedType = Base->getType(); |
986 | 2 | return POS; |
987 | 2 | } |
988 | | |
989 | 14 | return nullptr; |
990 | 14 | } |
991 | | |
992 | | void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base, |
993 | | llvm::Value *Index, QualType IndexType, |
994 | 40 | bool Accessed) { |
995 | 40 | assert(SanOpts.has(SanitizerKind::ArrayBounds) && |
996 | 40 | "should not be called unless adding bounds checks"); |
997 | 40 | SanitizerScope SanScope(this); |
998 | | |
999 | 40 | QualType IndexedType; |
1000 | 40 | llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType); |
1001 | 40 | if (!Bound) |
1002 | 14 | return; |
1003 | | |
1004 | 26 | bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType(); |
1005 | 26 | llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned); |
1006 | 26 | llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false); |
1007 | | |
1008 | 26 | llvm::Constant *StaticData[] = { |
1009 | 26 | EmitCheckSourceLocation(E->getExprLoc()), |
1010 | 26 | EmitCheckTypeDescriptor(IndexedType), |
1011 | 26 | EmitCheckTypeDescriptor(IndexType) |
1012 | 26 | }; |
1013 | 23 | llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal) |
1014 | 3 | : Builder.CreateICmpULE(IndexVal, BoundVal); |
1015 | 26 | EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds), |
1016 | 26 | SanitizerHandler::OutOfBounds, StaticData, Index); |
1017 | 26 | } |
1018 | | |
1019 | | |
1020 | | CodeGenFunction::ComplexPairTy CodeGenFunction:: |
1021 | | EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, |
1022 | 8 | bool isInc, bool isPre) { |
1023 | 8 | ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc()); |
1024 | | |
1025 | 8 | llvm::Value *NextVal; |
1026 | 8 | if (isa<llvm::IntegerType>(InVal.first->getType())) { |
1027 | 2 | uint64_t AmountVal = isInc ? 1 : -1; |
1028 | 4 | NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); |
1029 | | |
1030 | | // Add the inc/dec to the real part. |
1031 | 2 | NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); |
1032 | 4 | } else { |
1033 | 4 | QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType(); |
1034 | 4 | llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); |
1035 | 4 | if (!isInc) |
1036 | 2 | FVal.changeSign(); |
1037 | 4 | NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); |
1038 | | |
1039 | | // Add the inc/dec to the real part. |
1040 | 2 | NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); |
1041 | 4 | } |
1042 | | |
1043 | 8 | ComplexPairTy IncVal(NextVal, InVal.second); |
1044 | | |
1045 | | // Store the updated result through the lvalue. |
1046 | 8 | EmitStoreOfComplex(IncVal, LV, /*init*/ false); |
1047 | 8 | if (getLangOpts().OpenMP) |
1048 | 0 | CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this, |
1049 | 0 | E->getSubExpr()); |
1050 | | |
1051 | | // If this is a postinc, return the value read from memory, otherwise use the |
1052 | | // updated value. |
1053 | 4 | return isPre ? IncVal : InVal; |
1054 | 8 | } |
1055 | | |
1056 | | void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E, |
1057 | 129k | CodeGenFunction *CGF) { |
1058 | | // Bind VLAs in the cast type. |
1059 | 129k | if (CGF && E->getType()->isVariablyModifiedType()129k ) |
1060 | 13 | CGF->EmitVariablyModifiedType(E->getType()); |
1061 | | |
1062 | 129k | if (CGDebugInfo *DI = getModuleDebugInfo()) |
1063 | 39.6k | DI->EmitExplicitCastType(E->getType()); |
1064 | 129k | } |
1065 | | |
1066 | | //===----------------------------------------------------------------------===// |
1067 | | // LValue Expression Emission |
1068 | | //===----------------------------------------------------------------------===// |
1069 | | |
1070 | | /// EmitPointerWithAlignment - Given an expression of pointer type, try to |
1071 | | /// derive a more accurate bound on the alignment of the pointer. |
1072 | | Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E, |
1073 | | LValueBaseInfo *BaseInfo, |
1074 | 213k | TBAAAccessInfo *TBAAInfo) { |
1075 | | // We allow this with ObjC object pointers because of fragile ABIs. |
1076 | 213k | assert(E->getType()->isPointerType() || |
1077 | 213k | E->getType()->isObjCObjectPointerType()); |
1078 | 213k | E = E->IgnoreParens(); |
1079 | | |
1080 | | // Casts: |
1081 | 213k | if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { |
1082 | 136k | if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE)) |
1083 | 3.25k | CGM.EmitExplicitCastExprType(ECE, this); |
1084 | | |
1085 | 136k | switch (CE->getCastKind()) { |
1086 | | // Non-converting casts (but not C's implicit conversion from void*). |
1087 | 2.14k | case CK_BitCast: |
1088 | 9.96k | case CK_NoOp: |
1089 | 9.97k | case CK_AddressSpaceConversion: |
1090 | 9.97k | if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) { |
1091 | 9.97k | if (PtrTy->getPointeeType()->isVoidType()) |
1092 | 849 | break; |
1093 | | |
1094 | 9.12k | LValueBaseInfo InnerBaseInfo; |
1095 | 9.12k | TBAAAccessInfo InnerTBAAInfo; |
1096 | 9.12k | Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), |
1097 | 9.12k | &InnerBaseInfo, |
1098 | 9.12k | &InnerTBAAInfo); |
1099 | 9.12k | if (BaseInfo) *BaseInfo = InnerBaseInfo6.55k ; |
1100 | 9.12k | if (TBAAInfo) *TBAAInfo = InnerTBAAInfo6.54k ; |
1101 | | |
1102 | 9.12k | if (isa<ExplicitCastExpr>(CE)) { |
1103 | 2.37k | LValueBaseInfo TargetTypeBaseInfo; |
1104 | 2.37k | TBAAAccessInfo TargetTypeTBAAInfo; |
1105 | 2.37k | CharUnits Align = CGM.getNaturalPointeeTypeAlignment( |
1106 | 2.37k | E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo); |
1107 | 2.37k | if (TBAAInfo) |
1108 | 2.27k | *TBAAInfo = CGM.mergeTBAAInfoForCast(*TBAAInfo, |
1109 | 2.27k | TargetTypeTBAAInfo); |
1110 | | // If the source l-value is opaque, honor the alignment of the |
1111 | | // casted-to type. |
1112 | 2.37k | if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { |
1113 | 1.88k | if (BaseInfo) |
1114 | 1.85k | BaseInfo->mergeForCast(TargetTypeBaseInfo); |
1115 | 1.88k | Addr = Address(Addr.getPointer(), Align); |
1116 | 1.88k | } |
1117 | 2.37k | } |
1118 | | |
1119 | 9.12k | if (SanOpts.has(SanitizerKind::CFIUnrelatedCast) && |
1120 | 4 | CE->getCastKind() == CK_BitCast) { |
1121 | 2 | if (auto PT = E->getType()->getAs<PointerType>()) |
1122 | 2 | EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(), |
1123 | 2 | /*MayBeNull=*/true, |
1124 | 2 | CodeGenFunction::CFITCK_UnrelatedCast, |
1125 | 2 | CE->getBeginLoc()); |
1126 | 2 | } |
1127 | 9.12k | return CE->getCastKind() != CK_AddressSpaceConversion |
1128 | 9.11k | ? Builder.CreateBitCast(Addr, ConvertType(E->getType())) |
1129 | 5 | : Builder.CreateAddrSpaceCast(Addr, |
1130 | 5 | ConvertType(E->getType())); |
1131 | 9.12k | } |
1132 | 4 | break; |
1133 | | |
1134 | | // Array-to-pointer decay. |
1135 | 4.78k | case CK_ArrayToPointerDecay: |
1136 | 4.78k | return EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo); |
1137 | | |
1138 | | // Derived-to-base conversions. |
1139 | 9.92k | case CK_UncheckedDerivedToBase: |
1140 | 11.0k | case CK_DerivedToBase: { |
1141 | | // TODO: Support accesses to members of base classes in TBAA. For now, we |
1142 | | // conservatively pretend that the complete object is of the base class |
1143 | | // type. |
1144 | 11.0k | if (TBAAInfo) |
1145 | 9.71k | *TBAAInfo = CGM.getTBAAAccessInfo(E->getType()); |
1146 | 11.0k | Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), BaseInfo); |
1147 | 11.0k | auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); |
1148 | 11.0k | return GetAddressOfBaseClass(Addr, Derived, |
1149 | 11.0k | CE->path_begin(), CE->path_end(), |
1150 | 11.0k | ShouldNullCheckClassCastValue(CE), |
1151 | 11.0k | CE->getExprLoc()); |
1152 | 9.92k | } |
1153 | | |
1154 | | // TODO: Is there any reason to treat base-to-derived conversions |
1155 | | // specially? |
1156 | 110k | default: |
1157 | 110k | break; |
1158 | 188k | } |
1159 | 188k | } |
1160 | | |
1161 | | // Unary &. |
1162 | 188k | if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { |
1163 | 3.44k | if (UO->getOpcode() == UO_AddrOf) { |
1164 | 3.33k | LValue LV = EmitLValue(UO->getSubExpr()); |
1165 | 3.33k | if (BaseInfo) *BaseInfo = LV.getBaseInfo()670 ; |
1166 | 3.33k | if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo()666 ; |
1167 | 3.33k | return LV.getAddress(*this); |
1168 | 3.33k | } |
1169 | 185k | } |
1170 | | |
1171 | | // TODO: conditional operators, comma. |
1172 | | |
1173 | | // Otherwise, use the alignment of the type. |
1174 | 185k | CharUnits Align = |
1175 | 185k | CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo); |
1176 | 185k | return Address(EmitScalarExpr(E), Align); |
1177 | 185k | } |
1178 | | |
1179 | 34 | llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) { |
1180 | 34 | llvm::Value *V = RV.getScalarVal(); |
1181 | 34 | if (auto MPT = T->getAs<MemberPointerType>()) |
1182 | 4 | return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT); |
1183 | 30 | return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); |
1184 | 30 | } |
1185 | | |
1186 | 127k | RValue CodeGenFunction::GetUndefRValue(QualType Ty) { |
1187 | 127k | if (Ty->isVoidType()) |
1188 | 126k | return RValue::get(nullptr); |
1189 | | |
1190 | 402 | switch (getEvaluationKind(Ty)) { |
1191 | 0 | case TEK_Complex: { |
1192 | 0 | llvm::Type *EltTy = |
1193 | 0 | ConvertType(Ty->castAs<ComplexType>()->getElementType()); |
1194 | 0 | llvm::Value *U = llvm::UndefValue::get(EltTy); |
1195 | 0 | return RValue::getComplex(std::make_pair(U, U)); |
1196 | 0 | } |
1197 | | |
1198 | | // If this is a use of an undefined aggregate type, the aggregate must have an |
1199 | | // identifiable address. Just because the contents of the value are undefined |
1200 | | // doesn't mean that the address can't be taken and compared. |
1201 | 400 | case TEK_Aggregate: { |
1202 | 400 | Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); |
1203 | 400 | return RValue::getAggregate(DestPtr); |
1204 | 0 | } |
1205 | | |
1206 | 2 | case TEK_Scalar: |
1207 | 2 | return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); |
1208 | 0 | } |
1209 | 0 | llvm_unreachable("bad evaluation kind"); |
1210 | 0 | } |
1211 | | |
1212 | | RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, |
1213 | 0 | const char *Name) { |
1214 | 0 | ErrorUnsupported(E, Name); |
1215 | 0 | return GetUndefRValue(E->getType()); |
1216 | 0 | } |
1217 | | |
1218 | | LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, |
1219 | 0 | const char *Name) { |
1220 | 0 | ErrorUnsupported(E, Name); |
1221 | 0 | llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); |
1222 | 0 | return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()), |
1223 | 0 | E->getType()); |
1224 | 0 | } |
1225 | | |
1226 | 251k | bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) { |
1227 | 251k | const Expr *Base = Obj; |
1228 | 382k | while (!isa<CXXThisExpr>(Base)) { |
1229 | | // The result of a dynamic_cast can be null. |
1230 | 296k | if (isa<CXXDynamicCastExpr>(Base)) |
1231 | 3 | return false; |
1232 | | |
1233 | 296k | if (const auto *CE = dyn_cast<CastExpr>(Base)) { |
1234 | 130k | Base = CE->getSubExpr(); |
1235 | 166k | } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) { |
1236 | 875 | Base = PE->getSubExpr(); |
1237 | 165k | } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) { |
1238 | 4.73k | if (UO->getOpcode() == UO_Extension) |
1239 | 6 | Base = UO->getSubExpr(); |
1240 | 4.72k | else |
1241 | 4.72k | return false; |
1242 | 160k | } else { |
1243 | 160k | return false; |
1244 | 160k | } |
1245 | 296k | } |
1246 | 86.6k | return true; |
1247 | 251k | } |
1248 | | |
1249 | 1.03M | LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { |
1250 | 1.03M | LValue LV; |
1251 | 1.03M | if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E)207 ) |
1252 | 34 | LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true); |
1253 | 1.03M | else |
1254 | 1.03M | LV = EmitLValue(E); |
1255 | 1.03M | if (!isa<DeclRefExpr>(E) && !LV.isBitField()177k && LV.isSimple()175k ) { |
1256 | 175k | SanitizerSet SkippedChecks; |
1257 | 175k | if (const auto *ME = dyn_cast<MemberExpr>(E)) { |
1258 | 94.7k | bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase()); |
1259 | 94.7k | if (IsBaseCXXThis) |
1260 | 20.2k | SkippedChecks.set(SanitizerKind::Alignment, true); |
1261 | 94.7k | if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase())74.5k ) |
1262 | 28.9k | SkippedChecks.set(SanitizerKind::Null, true); |
1263 | 94.7k | } |
1264 | 175k | EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(*this), E->getType(), |
1265 | 175k | LV.getAlignment(), SkippedChecks); |
1266 | 175k | } |
1267 | 1.03M | return LV; |
1268 | 1.03M | } |
1269 | | |
1270 | | /// EmitLValue - Emit code to compute a designator that specifies the location |
1271 | | /// of the expression. |
1272 | | /// |
1273 | | /// This can return one of two things: a simple address or a bitfield reference. |
1274 | | /// In either case, the LLVM Value* in the LValue structure is guaranteed to be |
1275 | | /// an LLVM pointer type. |
1276 | | /// |
1277 | | /// If this returns a bitfield reference, nothing about the pointee type of the |
1278 | | /// LLVM value is known: For example, it may not be a pointer to an integer. |
1279 | | /// |
1280 | | /// If this returns a normal address, and if the lvalue's C type is fixed size, |
1281 | | /// this method guarantees that the returned pointer type will point to an LLVM |
1282 | | /// type of the same size of the lvalue's type. If the lvalue has a variable |
1283 | | /// length type, this is not possible. |
1284 | | /// |
1285 | 1.68M | LValue CodeGenFunction::EmitLValue(const Expr *E) { |
1286 | 1.68M | ApplyDebugLocation DL(*this, E); |
1287 | 1.68M | switch (E->getStmtClass()) { |
1288 | 0 | default: return EmitUnsupportedLValue(E, "l-value expression"); |
1289 | | |
1290 | 0 | case Expr::ObjCPropertyRefExprClass: |
1291 | 0 | llvm_unreachable("cannot emit a property reference directly"); |
1292 | | |
1293 | 1 | case Expr::ObjCSelectorExprClass: |
1294 | 1 | return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); |
1295 | 6 | case Expr::ObjCIsaExprClass: |
1296 | 6 | return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); |
1297 | 116k | case Expr::BinaryOperatorClass: |
1298 | 116k | return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); |
1299 | 19.3k | case Expr::CompoundAssignOperatorClass: { |
1300 | 19.3k | QualType Ty = E->getType(); |
1301 | 19.3k | if (const AtomicType *AT = Ty->getAs<AtomicType>()) |
1302 | 0 | Ty = AT->getValueType(); |
1303 | 19.3k | if (!Ty->isAnyComplexType()) |
1304 | 19.2k | return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); |
1305 | 64 | return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); |
1306 | 64 | } |
1307 | 16.8k | case Expr::CallExprClass: |
1308 | 39.2k | case Expr::CXXMemberCallExprClass: |
1309 | 44.1k | case Expr::CXXOperatorCallExprClass: |
1310 | 44.1k | case Expr::UserDefinedLiteralClass: |
1311 | 44.1k | return EmitCallExprLValue(cast<CallExpr>(E)); |
1312 | 0 | case Expr::CXXRewrittenBinaryOperatorClass: |
1313 | 0 | return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm()); |
1314 | 11 | case Expr::VAArgExprClass: |
1315 | 11 | return EmitVAArgExprLValue(cast<VAArgExpr>(E)); |
1316 | 1.16M | case Expr::DeclRefExprClass: |
1317 | 1.16M | return EmitDeclRefLValue(cast<DeclRefExpr>(E)); |
1318 | 4 | case Expr::ConstantExprClass: { |
1319 | 4 | const ConstantExpr *CE = cast<ConstantExpr>(E); |
1320 | 4 | if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) { |
1321 | 4 | QualType RetType = cast<CallExpr>(CE->getSubExpr()->IgnoreImplicit()) |
1322 | 4 | ->getCallReturnType(getContext()); |
1323 | 4 | return MakeNaturalAlignAddrLValue(Result, RetType); |
1324 | 4 | } |
1325 | 0 | return EmitLValue(cast<ConstantExpr>(E)->getSubExpr()); |
1326 | 0 | } |
1327 | 2.13k | case Expr::ParenExprClass: |
1328 | 2.13k | return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); |
1329 | 0 | case Expr::GenericSelectionExprClass: |
1330 | 0 | return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr()); |
1331 | 514 | case Expr::PredefinedExprClass: |
1332 | 514 | return EmitPredefinedLValue(cast<PredefinedExpr>(E)); |
1333 | 49.5k | case Expr::StringLiteralClass: |
1334 | 49.5k | return EmitStringLiteralLValue(cast<StringLiteral>(E)); |
1335 | 16 | case Expr::ObjCEncodeExprClass: |
1336 | 16 | return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); |
1337 | 15 | case Expr::PseudoObjectExprClass: |
1338 | 15 | return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); |
1339 | 6 | case Expr::InitListExprClass: |
1340 | 6 | return EmitInitListLValue(cast<InitListExpr>(E)); |
1341 | 1 | case Expr::CXXTemporaryObjectExprClass: |
1342 | 1 | case Expr::CXXConstructExprClass: |
1343 | 1 | return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); |
1344 | 3 | case Expr::CXXBindTemporaryExprClass: |
1345 | 3 | return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); |
1346 | 23 | case Expr::CXXUuidofExprClass: |
1347 | 23 | return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E)); |
1348 | 4 | case Expr::LambdaExprClass: |
1349 | 4 | return EmitAggExprToLValue(E); |
1350 | | |
1351 | 1.19k | case Expr::ExprWithCleanupsClass: { |
1352 | 1.19k | const auto *cleanups = cast<ExprWithCleanups>(E); |
1353 | 1.19k | RunCleanupsScope Scope(*this); |
1354 | 1.19k | LValue LV = EmitLValue(cleanups->getSubExpr()); |
1355 | 1.19k | if (LV.isSimple()) { |
1356 | | // Defend against branches out of gnu statement expressions surrounded by |
1357 | | // cleanups. |
1358 | 1.19k | llvm::Value *V = LV.getPointer(*this); |
1359 | 1.19k | Scope.ForceCleanup({&V}); |
1360 | 1.19k | return LValue::MakeAddr(Address(V, LV.getAlignment()), LV.getType(), |
1361 | 1.19k | getContext(), LV.getBaseInfo(), LV.getTBAAInfo()); |
1362 | 1.19k | } |
1363 | | // FIXME: Is it possible to create an ExprWithCleanups that produces a |
1364 | | // bitfield lvalue or some other non-simple lvalue? |
1365 | 0 | return LV; |
1366 | 0 | } |
1367 | |
|
1368 | 82 | case Expr::CXXDefaultArgExprClass: { |
1369 | 82 | auto *DAE = cast<CXXDefaultArgExpr>(E); |
1370 | 82 | CXXDefaultArgExprScope Scope(*this, DAE); |
1371 | 82 | return EmitLValue(DAE->getExpr()); |
1372 | 0 | } |
1373 | 14 | case Expr::CXXDefaultInitExprClass: { |
1374 | 14 | auto *DIE = cast<CXXDefaultInitExpr>(E); |
1375 | 14 | CXXDefaultInitExprScope Scope(*this, DIE); |
1376 | 14 | return EmitLValue(DIE->getExpr()); |
1377 | 0 | } |
1378 | 338 | case Expr::CXXTypeidExprClass: |
1379 | 338 | return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); |
1380 | |
|
1381 | 20 | case Expr::ObjCMessageExprClass: |
1382 | 20 | return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); |
1383 | 1.53k | case Expr::ObjCIvarRefExprClass: |
1384 | 1.53k | return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); |
1385 | 1 | case Expr::StmtExprClass: |
1386 | 1 | return EmitStmtExprLValue(cast<StmtExpr>(E)); |
1387 | 50.6k | case Expr::UnaryOperatorClass: |
1388 | 50.6k | return EmitUnaryOpLValue(cast<UnaryOperator>(E)); |
1389 | 45.4k | case Expr::ArraySubscriptExprClass: |
1390 | 45.4k | return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); |
1391 | 17 | case Expr::MatrixSubscriptExprClass: |
1392 | 17 | return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E)); |
1393 | 1.58k | case Expr::OMPArraySectionExprClass: |
1394 | 1.58k | return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E)); |
1395 | 284 | case Expr::ExtVectorElementExprClass: |
1396 | 284 | return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); |
1397 | 129k | case Expr::MemberExprClass: |
1398 | 129k | return EmitMemberExpr(cast<MemberExpr>(E)); |
1399 | 2.00k | case Expr::CompoundLiteralExprClass: |
1400 | 2.00k | return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); |
1401 | 499 | case Expr::ConditionalOperatorClass: |
1402 | 499 | return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); |
1403 | 6 | case Expr::BinaryConditionalOperatorClass: |
1404 | 6 | return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); |
1405 | 2 | case Expr::ChooseExprClass: |
1406 | 2 | return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr()); |
1407 | 585 | case Expr::OpaqueValueExprClass: |
1408 | 585 | return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); |
1409 | 8 | case Expr::SubstNonTypeTemplateParmExprClass: |
1410 | 8 | return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); |
1411 | 28.1k | case Expr::ImplicitCastExprClass: |
1412 | 28.2k | case Expr::CStyleCastExprClass: |
1413 | 28.2k | case Expr::CXXFunctionalCastExprClass: |
1414 | 39.1k | case Expr::CXXStaticCastExprClass: |
1415 | 39.2k | case Expr::CXXDynamicCastExprClass: |
1416 | 39.2k | case Expr::CXXReinterpretCastExprClass: |
1417 | 39.3k | case Expr::CXXConstCastExprClass: |
1418 | 39.3k | case Expr::CXXAddrspaceCastExprClass: |
1419 | 39.3k | case Expr::ObjCBridgedCastExprClass: |
1420 | 39.3k | return EmitCastLValue(cast<CastExpr>(E)); |
1421 | | |
1422 | 13.2k | case Expr::MaterializeTemporaryExprClass: |
1423 | 13.2k | return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); |
1424 | | |
1425 | 2 | case Expr::CoawaitExprClass: |
1426 | 2 | return EmitCoawaitLValue(cast<CoawaitExpr>(E)); |
1427 | 1 | case Expr::CoyieldExprClass: |
1428 | 1 | return EmitCoyieldLValue(cast<CoyieldExpr>(E)); |
1429 | 1.68M | } |
1430 | 1.68M | } |
1431 | | |
1432 | | /// Given an object of the given canonical type, can we safely copy a |
1433 | | /// value out of it based on its initializer? |
1434 | 423k | static bool isConstantEmittableObjectType(QualType type) { |
1435 | 423k | assert(type.isCanonical()); |
1436 | 423k | assert(!type->isReferenceType()); |
1437 | | |
1438 | | // Must be const-qualified but non-volatile. |
1439 | 423k | Qualifiers qs = type.getLocalQualifiers(); |
1440 | 423k | if (!qs.hasConst() || qs.hasVolatile()25.0k ) return false398k ; |
1441 | | |
1442 | | // Otherwise, all object types satisfy this except C++ classes with |
1443 | | // mutable subobjects or non-trivial copy/destroy behavior. |
1444 | 25.0k | if (const auto *RT = dyn_cast<RecordType>(type)) |
1445 | 0 | if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) |
1446 | 0 | if (RD->hasMutableFields() || !RD->isTrivial()) |
1447 | 0 | return false; |
1448 | | |
1449 | 25.0k | return true; |
1450 | 25.0k | } |
1451 | | |
1452 | | /// Can we constant-emit a load of a reference to a variable of the |
1453 | | /// given type? This is different from predicates like |
1454 | | /// Decl::mightBeUsableInConstantExpressions because we do want it to apply |
1455 | | /// in situations that don't necessarily satisfy the language's rules |
1456 | | /// for this (e.g. C++'s ODR-use rules). For example, we want to able |
1457 | | /// to do this with const float variables even if those variables |
1458 | | /// aren't marked 'constexpr'. |
1459 | | enum ConstantEmissionKind { |
1460 | | CEK_None, |
1461 | | CEK_AsReferenceOnly, |
1462 | | CEK_AsValueOrReference, |
1463 | | CEK_AsValueOnly |
1464 | | }; |
1465 | 423k | static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { |
1466 | 423k | type = type.getCanonicalType(); |
1467 | 423k | if (const auto *ref = dyn_cast<ReferenceType>(type)) { |
1468 | 2.14k | if (isConstantEmittableObjectType(ref->getPointeeType())) |
1469 | 9 | return CEK_AsValueOrReference; |
1470 | 2.13k | return CEK_AsReferenceOnly; |
1471 | 2.13k | } |
1472 | 421k | if (isConstantEmittableObjectType(type)) |
1473 | 25.0k | return CEK_AsValueOnly; |
1474 | 396k | return CEK_None; |
1475 | 396k | } |
1476 | | |
1477 | | /// Try to emit a reference to the given value without producing it as |
1478 | | /// an l-value. This is just an optimization, but it avoids us needing |
1479 | | /// to emit global copies of variables if they're named without triggering |
1480 | | /// a formal use in a context where we can't emit a direct reference to them, |
1481 | | /// for instance if a block or lambda or a member of a local class uses a |
1482 | | /// const int variable or constexpr variable from an enclosing function. |
1483 | | CodeGenFunction::ConstantEmission |
1484 | 719k | CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { |
1485 | 719k | ValueDecl *value = refExpr->getDecl(); |
1486 | | |
1487 | | // The value needs to be an enum constant or a constant variable. |
1488 | 719k | ConstantEmissionKind CEK; |
1489 | 719k | if (isa<ParmVarDecl>(value)) { |
1490 | 293k | CEK = CEK_None; |
1491 | 425k | } else if (auto *var = dyn_cast<VarDecl>(value)) { |
1492 | 423k | CEK = checkVarTypeForConstantEmission(var->getType()); |
1493 | 2.03k | } else if (isa<EnumConstantDecl>(value)) { |
1494 | 2.02k | CEK = CEK_AsValueOnly; |
1495 | 7 | } else { |
1496 | 7 | CEK = CEK_None; |
1497 | 7 | } |
1498 | 719k | if (CEK == CEK_None) return ConstantEmission()689k ; |
1499 | | |
1500 | 29.2k | Expr::EvalResult result; |
1501 | 29.2k | bool resultIsReference; |
1502 | 29.2k | QualType resultType; |
1503 | | |
1504 | | // It's best to evaluate all the way as an r-value if that's permitted. |
1505 | 29.2k | if (CEK != CEK_AsReferenceOnly && |
1506 | 27.0k | refExpr->EvaluateAsRValue(result, getContext())) { |
1507 | 7.74k | resultIsReference = false; |
1508 | 7.74k | resultType = refExpr->getType(); |
1509 | | |
1510 | | // Otherwise, try to evaluate as an l-value. |
1511 | 21.4k | } else if (CEK != CEK_AsValueOnly && |
1512 | 2.13k | refExpr->EvaluateAsLValue(result, getContext())) { |
1513 | 61 | resultIsReference = true; |
1514 | 61 | resultType = value->getType(); |
1515 | | |
1516 | | // Failure. |
1517 | 21.3k | } else { |
1518 | 21.3k | return ConstantEmission(); |
1519 | 21.3k | } |
1520 | | |
1521 | | // In any case, if the initializer has side-effects, abandon ship. |
1522 | 7.80k | if (result.HasSideEffects) |
1523 | 0 | return ConstantEmission(); |
1524 | | |
1525 | | // In CUDA/HIP device compilation, a lambda may capture a reference variable |
1526 | | // referencing a global host variable by copy. In this case the lambda should |
1527 | | // make a copy of the value of the global host variable. The DRE of the |
1528 | | // captured reference variable cannot be emitted as load from the host |
1529 | | // global variable as compile time constant, since the host variable is not |
1530 | | // accessible on device. The DRE of the captured reference variable has to be |
1531 | | // loaded from captures. |
1532 | 7.80k | if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue()7 && |
1533 | 5 | refExpr->refersToEnclosingVariableOrCapture()) { |
1534 | 3 | auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl); |
1535 | 3 | if (MD && MD->getParent()->isLambda() && |
1536 | 3 | MD->getOverloadedOperator() == OO_Call) { |
1537 | 3 | const APValue::LValueBase &base = result.Val.getLValueBase(); |
1538 | 3 | if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) { |
1539 | 3 | if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) { |
1540 | 3 | if (!VD->hasAttr<CUDADeviceAttr>()) { |
1541 | 1 | return ConstantEmission(); |
1542 | 1 | } |
1543 | 7.80k | } |
1544 | 3 | } |
1545 | 3 | } |
1546 | 3 | } |
1547 | | |
1548 | | // Emit as a constant. |
1549 | 7.80k | auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(), |
1550 | 7.80k | result.Val, resultType); |
1551 | | |
1552 | | // Make sure we emit a debug reference to the global variable. |
1553 | | // This should probably fire even for |
1554 | 7.80k | if (isa<VarDecl>(value)) { |
1555 | 5.77k | if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value))) |
1556 | 5.67k | EmitDeclRefExprDbgValue(refExpr, result.Val); |
1557 | 2.02k | } else { |
1558 | 2.02k | assert(isa<EnumConstantDecl>(value)); |
1559 | 2.02k | EmitDeclRefExprDbgValue(refExpr, result.Val); |
1560 | 2.02k | } |
1561 | | |
1562 | | // If we emitted a reference constant, we need to dereference that. |
1563 | 7.80k | if (resultIsReference) |
1564 | 60 | return ConstantEmission::forReference(C); |
1565 | | |
1566 | 7.74k | return ConstantEmission::forValue(C); |
1567 | 7.74k | } |
1568 | | |
1569 | | static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, |
1570 | 202k | const MemberExpr *ME) { |
1571 | 202k | if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) { |
1572 | | // Try to emit static variable member expressions as DREs. |
1573 | 51 | return DeclRefExpr::Create( |
1574 | 51 | CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD, |
1575 | 51 | /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(), |
1576 | 51 | ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse()); |
1577 | 51 | } |
1578 | 202k | return nullptr; |
1579 | 202k | } |
1580 | | |
1581 | | CodeGenFunction::ConstantEmission |
1582 | 73.0k | CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) { |
1583 | 73.0k | if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME)) |
1584 | 23 | return tryEmitAsConstant(DRE); |
1585 | 73.0k | return ConstantEmission(); |
1586 | 73.0k | } |
1587 | | |
1588 | | llvm::Value *CodeGenFunction::emitScalarConstant( |
1589 | 7.79k | const CodeGenFunction::ConstantEmission &Constant, Expr *E) { |
1590 | 7.79k | assert(Constant && "not a constant"); |
1591 | 7.79k | if (Constant.isReference()) |
1592 | 57 | return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E), |
1593 | 57 | E->getExprLoc()) |
1594 | 57 | .getScalarVal(); |
1595 | 7.73k | return Constant.getValue(); |
1596 | 7.73k | } |
1597 | | |
1598 | | llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue, |
1599 | 935k | SourceLocation Loc) { |
1600 | 935k | return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(), |
1601 | 935k | lvalue.getType(), Loc, lvalue.getBaseInfo(), |
1602 | 935k | lvalue.getTBAAInfo(), lvalue.isNontemporal()); |
1603 | 935k | } |
1604 | | |
1605 | 1.91M | static bool hasBooleanRepresentation(QualType Ty) { |
1606 | 1.91M | if (Ty->isBooleanType()) |
1607 | 10.6k | return true; |
1608 | | |
1609 | 1.90M | if (const EnumType *ET = Ty->getAs<EnumType>()) |
1610 | 2.05k | return ET->getDecl()->getIntegerType()->isBooleanType(); |
1611 | | |
1612 | 1.89M | if (const AtomicType *AT = Ty->getAs<AtomicType>()) |
1613 | 105 | return hasBooleanRepresentation(AT->getValueType()); |
1614 | | |
1615 | 1.89M | return false; |
1616 | 1.89M | } |
1617 | | |
1618 | | static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, |
1619 | | llvm::APInt &Min, llvm::APInt &End, |
1620 | 81.5k | bool StrictEnums, bool IsBool) { |
1621 | 81.5k | const EnumType *ET = Ty->getAs<EnumType>(); |
1622 | 81.5k | bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums5.23k && |
1623 | 50 | ET && !ET->getDecl()->isFixed()23 ; |
1624 | 81.5k | if (!IsBool && !IsRegularCPlusPlusEnum81.2k ) |
1625 | 81.2k | return false; |
1626 | | |
1627 | 277 | if (IsBool) { |
1628 | 256 | Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0); |
1629 | 256 | End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2); |
1630 | 21 | } else { |
1631 | 21 | const EnumDecl *ED = ET->getDecl(); |
1632 | 21 | llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType()); |
1633 | 21 | unsigned Bitwidth = LTy->getScalarSizeInBits(); |
1634 | 21 | unsigned NumNegativeBits = ED->getNumNegativeBits(); |
1635 | 21 | unsigned NumPositiveBits = ED->getNumPositiveBits(); |
1636 | | |
1637 | 21 | if (NumNegativeBits) { |
1638 | 9 | unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1); |
1639 | 9 | assert(NumBits <= Bitwidth); |
1640 | 9 | End = llvm::APInt(Bitwidth, 1) << (NumBits - 1); |
1641 | 9 | Min = -End; |
1642 | 12 | } else { |
1643 | 12 | assert(NumPositiveBits <= Bitwidth); |
1644 | 12 | End = llvm::APInt(Bitwidth, 1) << NumPositiveBits; |
1645 | 12 | Min = llvm::APInt(Bitwidth, 0); |
1646 | 12 | } |
1647 | 21 | } |
1648 | 277 | return true; |
1649 | 277 | } |
1650 | | |
1651 | 81.5k | llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { |
1652 | 81.5k | llvm::APInt Min, End; |
1653 | 81.5k | if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums, |
1654 | 81.5k | hasBooleanRepresentation(Ty))) |
1655 | 81.2k | return nullptr; |
1656 | | |
1657 | 254 | llvm::MDBuilder MDHelper(getLLVMContext()); |
1658 | 254 | return MDHelper.createRange(Min, End); |
1659 | 254 | } |
1660 | | |
1661 | | bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, |
1662 | 941k | SourceLocation Loc) { |
1663 | 941k | bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool); |
1664 | 941k | bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum); |
1665 | 941k | if (!HasBoolCheck && !HasEnumCheck941k ) |
1666 | 941k | return false; |
1667 | | |
1668 | 230 | bool IsBool = hasBooleanRepresentation(Ty) || |
1669 | 221 | NSAPI(CGM.getContext()).isObjCBOOLType(Ty); |
1670 | 230 | bool NeedsBoolCheck = HasBoolCheck && IsBool184 ; |
1671 | 230 | bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>()125 ; |
1672 | 230 | if (!NeedsBoolCheck && !NeedsEnumCheck211 ) |
1673 | 204 | return false; |
1674 | | |
1675 | | // Single-bit booleans don't need to be checked. Special-case this to avoid |
1676 | | // a bit width mismatch when handling bitfield values. This is handled by |
1677 | | // EmitFromMemory for the non-bitfield case. |
1678 | 26 | if (IsBool && |
1679 | 19 | cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1) |
1680 | 3 | return false; |
1681 | | |
1682 | 23 | llvm::APInt Min, End; |
1683 | 23 | if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool)) |
1684 | 0 | return true; |
1685 | | |
1686 | 23 | auto &Ctx = getLLVMContext(); |
1687 | 23 | SanitizerScope SanScope(this); |
1688 | 23 | llvm::Value *Check; |
1689 | 23 | --End; |
1690 | 23 | if (!Min) { |
1691 | 21 | Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End)); |
1692 | 2 | } else { |
1693 | 2 | llvm::Value *Upper = |
1694 | 2 | Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End)); |
1695 | 2 | llvm::Value *Lower = |
1696 | 2 | Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min)); |
1697 | 2 | Check = Builder.CreateAnd(Upper, Lower); |
1698 | 2 | } |
1699 | 23 | llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc), |
1700 | 23 | EmitCheckTypeDescriptor(Ty)}; |
1701 | 23 | SanitizerMask Kind = |
1702 | 16 | NeedsEnumCheck ? SanitizerKind::Enum7 : SanitizerKind::Bool; |
1703 | 23 | EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue, |
1704 | 23 | StaticArgs, EmitCheckValue(Value)); |
1705 | 23 | return true; |
1706 | 23 | } |
1707 | | |
1708 | | llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile, |
1709 | | QualType Ty, |
1710 | | SourceLocation Loc, |
1711 | | LValueBaseInfo BaseInfo, |
1712 | | TBAAAccessInfo TBAAInfo, |
1713 | 940k | bool isNontemporal) { |
1714 | 940k | if (!CGM.getCodeGenOpts().PreserveVec3Type) { |
1715 | | // For better performance, handle vector loads differently. |
1716 | 940k | if (Ty->isVectorType()) { |
1717 | 136k | const llvm::Type *EltTy = Addr.getElementType(); |
1718 | | |
1719 | 136k | const auto *VTy = cast<llvm::FixedVectorType>(EltTy); |
1720 | | |
1721 | | // Handle vectors of size 3 like size 4 for better performance. |
1722 | 136k | if (VTy->getNumElements() == 3) { |
1723 | | |
1724 | | // Bitcast to vec4 type. |
1725 | 51 | auto *vec4Ty = llvm::FixedVectorType::get(VTy->getElementType(), 4); |
1726 | 51 | Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4"); |
1727 | | // Now load value. |
1728 | 51 | llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4"); |
1729 | | |
1730 | | // Shuffle vector to get vec3. |
1731 | 51 | V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, |
1732 | 51 | "extractVec"); |
1733 | 51 | return EmitFromMemory(V, Ty); |
1734 | 51 | } |
1735 | 940k | } |
1736 | 940k | } |
1737 | | |
1738 | | // Atomic operations have to be done on integral types. |
1739 | 940k | LValue AtomicLValue = |
1740 | 940k | LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo); |
1741 | 940k | if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)940k ) { |
1742 | 54 | return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal(); |
1743 | 54 | } |
1744 | | |
1745 | 940k | llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile); |
1746 | 940k | if (isNontemporal) { |
1747 | 169 | llvm::MDNode *Node = llvm::MDNode::get( |
1748 | 169 | Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); |
1749 | 169 | Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); |
1750 | 169 | } |
1751 | | |
1752 | 940k | CGM.DecorateInstructionWithTBAA(Load, TBAAInfo); |
1753 | | |
1754 | 940k | if (EmitScalarRangeCheck(Load, Ty, Loc)) { |
1755 | | // In order to prevent the optimizer from throwing away the check, don't |
1756 | | // attach range metadata to the load. |
1757 | 940k | } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) |
1758 | 81.5k | if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) |
1759 | 251 | Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo); |
1760 | | |
1761 | 940k | return EmitFromMemory(Load, Ty); |
1762 | 940k | } |
1763 | | |
1764 | 875k | llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { |
1765 | | // Bool has a different representation in memory than in registers. |
1766 | 875k | if (hasBooleanRepresentation(Ty)) { |
1767 | | // This should really always be an i1, but sometimes it's already |
1768 | | // an i8, and it's awkward to track those cases down. |
1769 | 5.04k | if (Value->getType()->isIntegerTy(1)) |
1770 | 5.00k | return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool"); |
1771 | 40 | assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && |
1772 | 40 | "wrong value rep of bool"); |
1773 | 40 | } |
1774 | | |
1775 | 870k | return Value; |
1776 | 875k | } |
1777 | | |
1778 | 952k | llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { |
1779 | | // Bool has a different representation in memory than in registers. |
1780 | 952k | if (hasBooleanRepresentation(Ty)) { |
1781 | 5.32k | assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && |
1782 | 5.32k | "wrong value rep of bool"); |
1783 | 5.32k | return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); |
1784 | 5.32k | } |
1785 | | |
1786 | 947k | return Value; |
1787 | 947k | } |
1788 | | |
1789 | | // Convert the pointer of \p Addr to a pointer to a vector (the value type of |
1790 | | // MatrixType), if it points to a array (the memory type of MatrixType). |
1791 | | static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF, |
1792 | 368 | bool IsVector = true) { |
1793 | 368 | auto *ArrayTy = dyn_cast<llvm::ArrayType>( |
1794 | 368 | cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType()); |
1795 | 368 | if (ArrayTy && IsVector156 ) { |
1796 | 156 | auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(), |
1797 | 156 | ArrayTy->getNumElements()); |
1798 | | |
1799 | 156 | return Address(CGF.Builder.CreateElementBitCast(Addr, VectorTy)); |
1800 | 156 | } |
1801 | 212 | auto *VectorTy = dyn_cast<llvm::VectorType>( |
1802 | 212 | cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType()); |
1803 | 212 | if (VectorTy && !IsVector) { |
1804 | 0 | auto *ArrayTy = llvm::ArrayType::get( |
1805 | 0 | VectorTy->getElementType(), |
1806 | 0 | cast<llvm::FixedVectorType>(VectorTy)->getNumElements()); |
1807 | |
|
1808 | 0 | return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy)); |
1809 | 0 | } |
1810 | | |
1811 | 212 | return Addr; |
1812 | 212 | } |
1813 | | |
1814 | | // Emit a store of a matrix LValue. This may require casting the original |
1815 | | // pointer to memory address (ArrayType) to a pointer to the value type |
1816 | | // (VectorType). |
1817 | | static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, |
1818 | 197 | bool isInit, CodeGenFunction &CGF) { |
1819 | 197 | Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF, |
1820 | 197 | value->getType()->isVectorTy()); |
1821 | 197 | CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(), |
1822 | 197 | lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit, |
1823 | 197 | lvalue.isNontemporal()); |
1824 | 197 | } |
1825 | | |
1826 | | void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr, |
1827 | | bool Volatile, QualType Ty, |
1828 | | LValueBaseInfo BaseInfo, |
1829 | | TBAAAccessInfo TBAAInfo, |
1830 | 874k | bool isInit, bool isNontemporal) { |
1831 | 874k | if (!CGM.getCodeGenOpts().PreserveVec3Type) { |
1832 | | // Handle vectors differently to get better performance. |
1833 | 874k | if (Ty->isVectorType()) { |
1834 | 113k | llvm::Type *SrcTy = Value->getType(); |
1835 | 113k | auto *VecTy = dyn_cast<llvm::VectorType>(SrcTy); |
1836 | | // Handle vec3 special. |
1837 | 113k | if (VecTy && cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3113k ) { |
1838 | | // Our source is a vec3, do a shuffle vector to make it a vec4. |
1839 | 166 | Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1}, |
1840 | 166 | "extractVec"); |
1841 | 166 | SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4); |
1842 | 166 | } |
1843 | 113k | if (Addr.getElementType() != SrcTy) { |
1844 | 168 | Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp"); |
1845 | 168 | } |
1846 | 113k | } |
1847 | 874k | } |
1848 | | |
1849 | 874k | Value = EmitToMemory(Value, Ty); |
1850 | | |
1851 | 874k | LValue AtomicLValue = |
1852 | 874k | LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo); |
1853 | 874k | if (Ty->isAtomicType() || |
1854 | 874k | (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue)235k )) { |
1855 | 65 | EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit); |
1856 | 65 | return; |
1857 | 65 | } |
1858 | | |
1859 | 874k | llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); |
1860 | 874k | if (isNontemporal) { |
1861 | 231 | llvm::MDNode *Node = |
1862 | 231 | llvm::MDNode::get(Store->getContext(), |
1863 | 231 | llvm::ConstantAsMetadata::get(Builder.getInt32(1))); |
1864 | 231 | Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); |
1865 | 231 | } |
1866 | | |
1867 | 874k | CGM.DecorateInstructionWithTBAA(Store, TBAAInfo); |
1868 | 874k | } |
1869 | | |
1870 | | void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, |
1871 | 873k | bool isInit) { |
1872 | 873k | if (lvalue.getType()->isConstantMatrixType()) { |
1873 | 197 | EmitStoreOfMatrixScalar(value, lvalue, isInit, *this); |
1874 | 197 | return; |
1875 | 197 | } |
1876 | | |
1877 | 873k | EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(), |
1878 | 873k | lvalue.getType(), lvalue.getBaseInfo(), |
1879 | 873k | lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal()); |
1880 | 873k | } |
1881 | | |
1882 | | // Emit a load of a LValue of matrix type. This may require casting the pointer |
1883 | | // to memory address (ArrayType) to a pointer to the value type (VectorType). |
1884 | | static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, |
1885 | 154 | CodeGenFunction &CGF) { |
1886 | 154 | assert(LV.getType()->isConstantMatrixType()); |
1887 | 154 | Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF); |
1888 | 154 | LV.setAddress(Addr); |
1889 | 154 | return RValue::get(CGF.EmitLoadOfScalar(LV, Loc)); |
1890 | 154 | } |
1891 | | |
1892 | | /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this |
1893 | | /// method emits the address of the lvalue, then loads the result as an rvalue, |
1894 | | /// returning the rvalue. |
1895 | 865k | RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { |
1896 | 865k | if (LV.isObjCWeak()) { |
1897 | | // load of a __weak object. |
1898 | 39 | Address AddrWeakObj = LV.getAddress(*this); |
1899 | 39 | return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, |
1900 | 39 | AddrWeakObj)); |
1901 | 39 | } |
1902 | 865k | if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { |
1903 | | // In MRC mode, we do a load+autorelease. |
1904 | 150 | if (!getLangOpts().ObjCAutoRefCount) { |
1905 | 15 | return RValue::get(EmitARCLoadWeak(LV.getAddress(*this))); |
1906 | 15 | } |
1907 | | |
1908 | | // In ARC mode, we load retained and then consume the value. |
1909 | 135 | llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this)); |
1910 | 135 | Object = EmitObjCConsumeObject(LV.getType(), Object); |
1911 | 135 | return RValue::get(Object); |
1912 | 135 | } |
1913 | | |
1914 | 865k | if (LV.isSimple()) { |
1915 | 864k | assert(!LV.getType()->isFunctionType()); |
1916 | | |
1917 | 864k | if (LV.getType()->isConstantMatrixType()) |
1918 | 154 | return EmitLoadOfMatrixLValue(LV, Loc, *this); |
1919 | | |
1920 | | // Everything needs a load. |
1921 | 864k | return RValue::get(EmitLoadOfScalar(LV, Loc)); |
1922 | 864k | } |
1923 | | |
1924 | 1.24k | if (LV.isVectorElt()) { |
1925 | 49 | llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(), |
1926 | 49 | LV.isVolatileQualified()); |
1927 | 49 | return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(), |
1928 | 49 | "vecext")); |
1929 | 49 | } |
1930 | | |
1931 | | // If this is a reference to a subset of the elements of a vector, either |
1932 | | // shuffle the input or extract/insert them as appropriate. |
1933 | 1.20k | if (LV.isExtVectorElt()) { |
1934 | 244 | return EmitLoadOfExtVectorElementLValue(LV); |
1935 | 244 | } |
1936 | | |
1937 | | // Global Register variables always invoke intrinsics |
1938 | 956 | if (LV.isGlobalReg()) |
1939 | 24 | return EmitLoadOfGlobalRegLValue(LV); |
1940 | | |
1941 | 932 | if (LV.isMatrixElt()) { |
1942 | 2 | llvm::LoadInst *Load = |
1943 | 2 | Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified()); |
1944 | 2 | return RValue::get( |
1945 | 2 | Builder.CreateExtractElement(Load, LV.getMatrixIdx(), "matrixext")); |
1946 | 2 | } |
1947 | | |
1948 | 930 | assert(LV.isBitField() && "Unknown LValue type!"); |
1949 | 930 | return EmitLoadOfBitfieldLValue(LV, Loc); |
1950 | 930 | } |
1951 | | |
1952 | | RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, |
1953 | 950 | SourceLocation Loc) { |
1954 | 950 | const CGBitFieldInfo &Info = LV.getBitFieldInfo(); |
1955 | | |
1956 | | // Get the output type. |
1957 | 950 | llvm::Type *ResLTy = ConvertType(LV.getType()); |
1958 | | |
1959 | 950 | Address Ptr = LV.getBitFieldAddress(); |
1960 | 950 | llvm::Value *Val = |
1961 | 950 | Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load"); |
1962 | | |
1963 | 950 | bool UseVolatile = LV.isVolatileQualified() && |
1964 | 237 | Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget())57 ; |
1965 | 893 | const unsigned Offset = UseVolatile ? Info.VolatileOffset57 : Info.Offset; |
1966 | 950 | const unsigned StorageSize = |
1967 | 893 | UseVolatile ? Info.VolatileStorageSize57 : Info.StorageSize; |
1968 | 950 | if (Info.IsSigned) { |
1969 | 626 | assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize); |
1970 | 626 | unsigned HighBits = StorageSize - Offset - Info.Size; |
1971 | 626 | if (HighBits) |
1972 | 397 | Val = Builder.CreateShl(Val, HighBits, "bf.shl"); |
1973 | 626 | if (Offset + HighBits) |
1974 | 520 | Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr"); |
1975 | 324 | } else { |
1976 | 324 | if (Offset) |
1977 | 223 | Val = Builder.CreateLShr(Val, Offset, "bf.lshr"); |
1978 | 324 | if (static_cast<unsigned>(Offset) + Info.Size < StorageSize) |
1979 | 286 | Val = Builder.CreateAnd( |
1980 | 286 | Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear"); |
1981 | 324 | } |
1982 | 950 | Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast"); |
1983 | 950 | EmitScalarRangeCheck(Val, LV.getType(), Loc); |
1984 | 950 | return RValue::get(Val); |
1985 | 950 | } |
1986 | | |
1987 | | // If this is a reference to a subset of the elements of a vector, create an |
1988 | | // appropriate shufflevector. |
1989 | 246 | RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { |
1990 | 246 | llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(), |
1991 | 246 | LV.isVolatileQualified()); |
1992 | | |
1993 | 246 | const llvm::Constant *Elts = LV.getExtVectorElts(); |
1994 | | |
1995 | | // If the result of the expression is a non-vector type, we must be extracting |
1996 | | // a single element. Just codegen as an extractelement. |
1997 | 246 | const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); |
1998 | 246 | if (!ExprVT) { |
1999 | 203 | unsigned InIdx = getAccessedFieldNo(0, Elts); |
2000 | 203 | llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); |
2001 | 203 | return RValue::get(Builder.CreateExtractElement(Vec, Elt)); |
2002 | 203 | } |
2003 | | |
2004 | | // Always use shuffle vector to try to retain the original program structure |
2005 | 43 | unsigned NumResultElts = ExprVT->getNumElements(); |
2006 | | |
2007 | 43 | SmallVector<int, 4> Mask; |
2008 | 169 | for (unsigned i = 0; i != NumResultElts; ++i126 ) |
2009 | 126 | Mask.push_back(getAccessedFieldNo(i, Elts)); |
2010 | | |
2011 | 43 | Vec = Builder.CreateShuffleVector(Vec, Mask); |
2012 | 43 | return RValue::get(Vec); |
2013 | 43 | } |
2014 | | |
2015 | | /// Generates lvalue for partial ext_vector access. |
2016 | 1 | Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) { |
2017 | 1 | Address VectorAddress = LV.getExtVectorAddress(); |
2018 | 1 | QualType EQT = LV.getType()->castAs<VectorType>()->getElementType(); |
2019 | 1 | llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT); |
2020 | | |
2021 | 1 | Address CastToPointerElement = |
2022 | 1 | Builder.CreateElementBitCast(VectorAddress, VectorElementTy, |
2023 | 1 | "conv.ptr.element"); |
2024 | | |
2025 | 1 | const llvm::Constant *Elts = LV.getExtVectorElts(); |
2026 | 1 | unsigned ix = getAccessedFieldNo(0, Elts); |
2027 | | |
2028 | 1 | Address VectorBasePtrPlusIx = |
2029 | 1 | Builder.CreateConstInBoundsGEP(CastToPointerElement, ix, |
2030 | 1 | "vector.elt"); |
2031 | | |
2032 | 1 | return VectorBasePtrPlusIx; |
2033 | 1 | } |
2034 | | |
2035 | | /// Load of global gamed gegisters are always calls to intrinsics. |
2036 | 24 | RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) { |
2037 | 24 | assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) && |
2038 | 24 | "Bad type for register variable"); |
2039 | 24 | llvm::MDNode *RegName = cast<llvm::MDNode>( |
2040 | 24 | cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata()); |
2041 | | |
2042 | | // We accept integer and pointer types only |
2043 | 24 | llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType()); |
2044 | 24 | llvm::Type *Ty = OrigTy; |
2045 | 24 | if (OrigTy->isPointerTy()) |
2046 | 3 | Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); |
2047 | 24 | llvm::Type *Types[] = { Ty }; |
2048 | | |
2049 | 24 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); |
2050 | 24 | llvm::Value *Call = Builder.CreateCall( |
2051 | 24 | F, llvm::MetadataAsValue::get(Ty->getContext(), RegName)); |
2052 | 24 | if (OrigTy->isPointerTy()) |
2053 | 3 | Call = Builder.CreateIntToPtr(Call, OrigTy); |
2054 | 24 | return RValue::get(Call); |
2055 | 24 | } |
2056 | | |
2057 | | /// EmitStoreThroughLValue - Store the specified rvalue into the specified |
2058 | | /// lvalue, where both are guaranteed to the have the same type, and that type |
2059 | | /// is 'Ty'. |
2060 | | void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, |
2061 | 386k | bool isInit) { |
2062 | 386k | if (!Dst.isSimple()) { |
2063 | 818 | if (Dst.isVectorElt()) { |
2064 | | // Read/modify/write the vector, inserting the new element. |
2065 | 439 | llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(), |
2066 | 439 | Dst.isVolatileQualified()); |
2067 | 439 | Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), |
2068 | 439 | Dst.getVectorIdx(), "vecins"); |
2069 | 439 | Builder.CreateStore(Vec, Dst.getVectorAddress(), |
2070 | 439 | Dst.isVolatileQualified()); |
2071 | 439 | return; |
2072 | 439 | } |
2073 | | |
2074 | | // If this is an update of extended vector elements, insert them as |
2075 | | // appropriate. |
2076 | 379 | if (Dst.isExtVectorElt()) |
2077 | 29 | return EmitStoreThroughExtVectorComponentLValue(Src, Dst); |
2078 | | |
2079 | 350 | if (Dst.isGlobalReg()) |
2080 | 18 | return EmitStoreThroughGlobalRegLValue(Src, Dst); |
2081 | | |
2082 | 332 | if (Dst.isMatrixElt()) { |
2083 | 17 | llvm::Value *Vec = Builder.CreateLoad(Dst.getMatrixAddress()); |
2084 | 17 | Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), |
2085 | 17 | Dst.getMatrixIdx(), "matins"); |
2086 | 17 | Builder.CreateStore(Vec, Dst.getMatrixAddress(), |
2087 | 17 | Dst.isVolatileQualified()); |
2088 | 17 | return; |
2089 | 17 | } |
2090 | | |
2091 | 315 | assert(Dst.isBitField() && "Unknown LValue type"); |
2092 | 315 | return EmitStoreThroughBitfieldLValue(Src, Dst); |
2093 | 315 | } |
2094 | | |
2095 | | // There's special magic for assigning into an ARC-qualified l-value. |
2096 | 385k | if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { |
2097 | 71 | switch (Lifetime) { |
2098 | 0 | case Qualifiers::OCL_None: |
2099 | 0 | llvm_unreachable("present but none"); |
2100 | | |
2101 | 41 | case Qualifiers::OCL_ExplicitNone: |
2102 | | // nothing special |
2103 | 41 | break; |
2104 | | |
2105 | 19 | case Qualifiers::OCL_Strong: |
2106 | 19 | if (isInit) { |
2107 | 3 | Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal())); |
2108 | 3 | break; |
2109 | 3 | } |
2110 | 16 | EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); |
2111 | 16 | return; |
2112 | | |
2113 | 11 | case Qualifiers::OCL_Weak: |
2114 | 11 | if (isInit) |
2115 | | // Initialize and then skip the primitive store. |
2116 | 3 | EmitARCInitWeak(Dst.getAddress(*this), Src.getScalarVal()); |
2117 | 8 | else |
2118 | 8 | EmitARCStoreWeak(Dst.getAddress(*this), Src.getScalarVal(), |
2119 | 8 | /*ignore*/ true); |
2120 | 11 | return; |
2121 | | |
2122 | 0 | case Qualifiers::OCL_Autoreleasing: |
2123 | 0 | Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), |
2124 | 0 | Src.getScalarVal())); |
2125 | | // fall into the normal path |
2126 | 0 | break; |
2127 | 385k | } |
2128 | 385k | } |
2129 | | |
2130 | 385k | if (Dst.isObjCWeak() && !Dst.isNonGC()32 ) { |
2131 | | // load of a __weak object. |
2132 | 26 | Address LvalueDst = Dst.getAddress(*this); |
2133 | 26 | llvm::Value *src = Src.getScalarVal(); |
2134 | 26 | CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); |
2135 | 26 | return; |
2136 | 26 | } |
2137 | | |
2138 | 385k | if (Dst.isObjCStrong() && !Dst.isNonGC()248 ) { |
2139 | | // load of a __strong object. |
2140 | 216 | Address LvalueDst = Dst.getAddress(*this); |
2141 | 216 | llvm::Value *src = Src.getScalarVal(); |
2142 | 216 | if (Dst.isObjCIvar()) { |
2143 | 54 | assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); |
2144 | 54 | llvm::Type *ResultType = IntPtrTy; |
2145 | 54 | Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp()); |
2146 | 54 | llvm::Value *RHS = dst.getPointer(); |
2147 | 54 | RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); |
2148 | 54 | llvm::Value *LHS = |
2149 | 54 | Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType, |
2150 | 54 | "sub.ptr.lhs.cast"); |
2151 | 54 | llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); |
2152 | 54 | CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, |
2153 | 54 | BytesBetween); |
2154 | 162 | } else if (Dst.isGlobalObjCRef()) { |
2155 | 81 | CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, |
2156 | 81 | Dst.isThreadLocalRef()); |
2157 | 81 | } |
2158 | 81 | else |
2159 | 81 | CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); |
2160 | 216 | return; |
2161 | 216 | } |
2162 | | |
2163 | 385k | assert(Src.isScalar() && "Can't emit an agg store with this method"); |
2164 | 385k | EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); |
2165 | 385k | } |
2166 | | |
2167 | | void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, |
2168 | 1.01k | llvm::Value **Result) { |
2169 | 1.01k | const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); |
2170 | 1.01k | llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); |
2171 | 1.01k | Address Ptr = Dst.getBitFieldAddress(); |
2172 | | |
2173 | | // Get the source value, truncated to the width of the bit-field. |
2174 | 1.01k | llvm::Value *SrcVal = Src.getScalarVal(); |
2175 | | |
2176 | | // Cast the source to the storage type and shift it into place. |
2177 | 1.01k | SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(), |
2178 | 1.01k | /*isSigned=*/false); |
2179 | 1.01k | llvm::Value *MaskedVal = SrcVal; |
2180 | | |
2181 | 1.01k | const bool UseVolatile = |
2182 | 1.01k | CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified()864 && |
2183 | 109 | Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget())57 ; |
2184 | 1.01k | const unsigned StorageSize = |
2185 | 955 | UseVolatile ? Info.VolatileStorageSize57 : Info.StorageSize; |
2186 | 955 | const unsigned Offset = UseVolatile ? Info.VolatileOffset57 : Info.Offset; |
2187 | | // See if there are other bits in the bitfield's storage we'll need to load |
2188 | | // and mask together with source before storing. |
2189 | 1.01k | if (StorageSize != Info.Size) { |
2190 | 886 | assert(StorageSize > Info.Size && "Invalid bitfield size."); |
2191 | 886 | llvm::Value *Val = |
2192 | 886 | Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load"); |
2193 | | |
2194 | | // Mask the source value as needed. |
2195 | 886 | if (!hasBooleanRepresentation(Dst.getType())) |
2196 | 872 | SrcVal = Builder.CreateAnd( |
2197 | 872 | SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), |
2198 | 872 | "bf.value"); |
2199 | 886 | MaskedVal = SrcVal; |
2200 | 886 | if (Offset) |
2201 | 508 | SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl"); |
2202 | | |
2203 | | // Mask out the original value. |
2204 | 886 | Val = Builder.CreateAnd( |
2205 | 886 | Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size), |
2206 | 886 | "bf.clear"); |
2207 | | |
2208 | | // Or together the unchanged values and the source value. |
2209 | 886 | SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set"); |
2210 | 126 | } else { |
2211 | 126 | assert(Offset == 0); |
2212 | | // According to the AACPS: |
2213 | | // When a volatile bit-field is written, and its container does not overlap |
2214 | | // with any non-bit-field member, its container must be read exactly once |
2215 | | // and written exactly once using the access width appropriate to the type |
2216 | | // of the container. The two accesses are not atomic. |
2217 | 126 | if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget())72 && |
2218 | 72 | CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) |
2219 | 36 | Builder.CreateLoad(Ptr, true, "bf.load"); |
2220 | 126 | } |
2221 | | |
2222 | | // Write the new value back out. |
2223 | 1.01k | Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified()); |
2224 | | |
2225 | | // Return the new value of the bit-field, if requested. |
2226 | 1.01k | if (Result) { |
2227 | 696 | llvm::Value *ResultVal = MaskedVal; |
2228 | | |
2229 | | // Sign extend the value if needed. |
2230 | 696 | if (Info.IsSigned) { |
2231 | 442 | assert(Info.Size <= StorageSize); |
2232 | 442 | unsigned HighBits = StorageSize - Info.Size; |
2233 | 442 | if (HighBits) { |
2234 | 356 | ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl"); |
2235 | 356 | ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr"); |
2236 | 356 | } |
2237 | 442 | } |
2238 | | |
2239 | 696 | ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned, |
2240 | 696 | "bf.result.cast"); |
2241 | 696 | *Result = EmitFromMemory(ResultVal, Dst.getType()); |
2242 | 696 | } |
2243 | 1.01k | } |
2244 | | |
2245 | | void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, |
2246 | 29 | LValue Dst) { |
2247 | | // This access turns into a read/modify/write of the vector. Load the input |
2248 | | // value now. |
2249 | 29 | llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddress(), |
2250 | 29 | Dst.isVolatileQualified()); |
2251 | 29 | const llvm::Constant *Elts = Dst.getExtVectorElts(); |
2252 | | |
2253 | 29 | llvm::Value *SrcVal = Src.getScalarVal(); |
2254 | | |
2255 | 29 | if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { |
2256 | 8 | unsigned NumSrcElts = VTy->getNumElements(); |
2257 | 8 | unsigned NumDstElts = |
2258 | 8 | cast<llvm::FixedVectorType>(Vec->getType())->getNumElements(); |
2259 | 8 | if (NumDstElts == NumSrcElts) { |
2260 | | // Use shuffle vector is the src and destination are the same number of |
2261 | | // elements and restore the vector mask since it is on the side it will be |
2262 | | // stored. |
2263 | 2 | SmallVector<int, 4> Mask(NumDstElts); |
2264 | 6 | for (unsigned i = 0; i != NumSrcElts; ++i4 ) |
2265 | 4 | Mask[getAccessedFieldNo(i, Elts)] = i; |
2266 | | |
2267 | 2 | Vec = Builder.CreateShuffleVector(SrcVal, Mask); |
2268 | 6 | } else if (NumDstElts > NumSrcElts) { |
2269 | | // Extended the source vector to the same length and then shuffle it |
2270 | | // into the destination. |
2271 | | // FIXME: since we're shuffling with undef, can we just use the indices |
2272 | | // into that? This could be simpler. |
2273 | 6 | SmallVector<int, 4> ExtMask; |
2274 | 32 | for (unsigned i = 0; i != NumSrcElts; ++i26 ) |
2275 | 26 | ExtMask.push_back(i); |
2276 | 6 | ExtMask.resize(NumDstElts, -1); |
2277 | 6 | llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask); |
2278 | | // build identity |
2279 | 6 | SmallVector<int, 4> Mask; |
2280 | 52 | for (unsigned i = 0; i != NumDstElts; ++i46 ) |
2281 | 46 | Mask.push_back(i); |
2282 | | |
2283 | | // When the vector size is odd and .odd or .hi is used, the last element |
2284 | | // of the Elts constant array will be one past the size of the vector. |
2285 | | // Ignore the last element here, if it is greater than the mask size. |
2286 | 6 | if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size()) |
2287 | 2 | NumSrcElts--; |
2288 | | |
2289 | | // modify when what gets shuffled in |
2290 | 30 | for (unsigned i = 0; i != NumSrcElts; ++i24 ) |
2291 | 24 | Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts; |
2292 | 6 | Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask); |
2293 | 0 | } else { |
2294 | | // We should never shorten the vector |
2295 | 0 | llvm_unreachable("unexpected shorten vector length"); |
2296 | 0 | } |
2297 | 21 | } else { |
2298 | | // If the Src is a scalar (not a vector) it must be updating one element. |
2299 | 21 | unsigned InIdx = getAccessedFieldNo(0, Elts); |
2300 | 21 | llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); |
2301 | 21 | Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); |
2302 | 21 | } |
2303 | | |
2304 | 29 | Builder.CreateStore(Vec, Dst.getExtVectorAddress(), |
2305 | 29 | Dst.isVolatileQualified()); |
2306 | 29 | } |
2307 | | |
2308 | | /// Store of global named registers are always calls to intrinsics. |
2309 | 18 | void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) { |
2310 | 18 | assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) && |
2311 | 18 | "Bad type for register variable"); |
2312 | 18 | llvm::MDNode *RegName = cast<llvm::MDNode>( |
2313 | 18 | cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata()); |
2314 | 18 | assert(RegName && "Register LValue is not metadata"); |
2315 | | |
2316 | | // We accept integer and pointer types only |
2317 | 18 | llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType()); |
2318 | 18 | llvm::Type *Ty = OrigTy; |
2319 | 18 | if (OrigTy->isPointerTy()) |
2320 | 3 | Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); |
2321 | 18 | llvm::Type *Types[] = { Ty }; |
2322 | | |
2323 | 18 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); |
2324 | 18 | llvm::Value *Value = Src.getScalarVal(); |
2325 | 18 | if (OrigTy->isPointerTy()) |
2326 | 3 | Value = Builder.CreatePtrToInt(Value, Ty); |
2327 | 18 | Builder.CreateCall( |
2328 | 18 | F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value}); |
2329 | 18 | } |
2330 | | |
2331 | | // setObjCGCLValueClass - sets class of the lvalue for the purpose of |
2332 | | // generating write-barries API. It is currently a global, ivar, |
2333 | | // or neither. |
2334 | | static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, |
2335 | | LValue &LV, |
2336 | 1.18M | bool IsMemberAccess=false) { |
2337 | 1.18M | if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) |
2338 | 1.18M | return; |
2339 | | |
2340 | 1.71k | if (isa<ObjCIvarRefExpr>(E)) { |
2341 | 252 | QualType ExpTy = E->getType(); |
2342 | 252 | if (IsMemberAccess && ExpTy->isPointerType()62 ) { |
2343 | | // If ivar is a structure pointer, assigning to field of |
2344 | | // this struct follows gcc's behavior and makes it a non-ivar |
2345 | | // writer-barrier conservatively. |
2346 | 14 | ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); |
2347 | 14 | if (ExpTy->isRecordType()) { |
2348 | 6 | LV.setObjCIvar(false); |
2349 | 6 | return; |
2350 | 6 | } |
2351 | 246 | } |
2352 | 246 | LV.setObjCIvar(true); |
2353 | 246 | auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E)); |
2354 | 246 | LV.setBaseIvarExp(Exp->getBase()); |
2355 | 246 | LV.setObjCArray(E->getType()->isArrayType()); |
2356 | 246 | return; |
2357 | 246 | } |
2358 | | |
2359 | 1.46k | if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) { |
2360 | 892 | if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) { |
2361 | 892 | if (VD->hasGlobalStorage()) { |
2362 | 284 | LV.setGlobalObjCRef(true); |
2363 | 284 | LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None); |
2364 | 284 | } |
2365 | 892 | } |
2366 | 892 | LV.setObjCArray(E->getType()->isArrayType()); |
2367 | 892 | return; |
2368 | 892 | } |
2369 | | |
2370 | 573 | if (const auto *Exp = dyn_cast<UnaryOperator>(E)) { |
2371 | 20 | setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); |
2372 | 20 | return; |
2373 | 20 | } |
2374 | | |
2375 | 553 | if (const auto *Exp = dyn_cast<ParenExpr>(E)) { |
2376 | 26 | setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); |
2377 | 26 | if (LV.isObjCIvar()) { |
2378 | | // If cast is to a structure pointer, follow gcc's behavior and make it |
2379 | | // a non-ivar write-barrier. |
2380 | 12 | QualType ExpTy = E->getType(); |
2381 | 12 | if (ExpTy->isPointerType()) |
2382 | 10 | ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); |
2383 | 12 | if (ExpTy->isRecordType()) |
2384 | 8 | LV.setObjCIvar(false); |
2385 | 12 | } |
2386 | 26 | return; |
2387 | 26 | } |
2388 | | |
2389 | 527 | if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) { |
2390 | 0 | setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); |
2391 | 0 | return; |
2392 | 0 | } |
2393 | | |
2394 | 527 | if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) { |
2395 | 173 | setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); |
2396 | 173 | return; |
2397 | 173 | } |
2398 | | |
2399 | 354 | if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) { |
2400 | 14 | setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); |
2401 | 14 | return; |
2402 | 14 | } |
2403 | | |
2404 | 340 | if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { |
2405 | 0 | setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); |
2406 | 0 | return; |
2407 | 0 | } |
2408 | | |
2409 | 340 | if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) { |
2410 | 139 | setObjCGCLValueClass(Ctx, Exp->getBase(), LV); |
2411 | 139 | if (LV.isObjCIvar() && !LV.isObjCArray()72 ) |
2412 | | // Using array syntax to assigning to what an ivar points to is not |
2413 | | // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; |
2414 | 38 | LV.setObjCIvar(false); |
2415 | 101 | else if (LV.isGlobalObjCRef() && !LV.isObjCArray()50 ) |
2416 | | // Using array syntax to assigning to what global points to is not |
2417 | | // same as assigning to the global itself. {id *G;} G[i] = 0; |
2418 | 4 | LV.setGlobalObjCRef(false); |
2419 | 139 | return; |
2420 | 139 | } |
2421 | | |
2422 | 201 | if (const auto *Exp = dyn_cast<MemberExpr>(E)) { |
2423 | 188 | setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); |
2424 | | // We don't know if member is an 'ivar', but this flag is looked at |
2425 | | // only in the context of LV.isObjCIvar(). |
2426 | 188 | LV.setObjCArray(E->getType()->isArrayType()); |
2427 | 188 | return; |
2428 | 188 | } |
2429 | 201 | } |
2430 | | |
2431 | | static llvm::Value * |
2432 | | EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, |
2433 | | llvm::Value *V, llvm::Type *IRType, |
2434 | 98.8k | StringRef Name = StringRef()) { |
2435 | 98.8k | unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); |
2436 | 98.8k | return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); |
2437 | 98.8k | } |
2438 | | |
2439 | | static LValue EmitThreadPrivateVarDeclLValue( |
2440 | | CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, |
2441 | 254 | llvm::Type *RealVarTy, SourceLocation Loc) { |
2442 | 254 | if (CGF.CGM.getLangOpts().OpenMPIRBuilder) |
2443 | 0 | Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( |
2444 | 0 | CGF, VD, Addr, Loc); |
2445 | 254 | else |
2446 | 254 | Addr = |
2447 | 254 | CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc); |
2448 | | |
2449 | 254 | Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy); |
2450 | 254 | return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); |
2451 | 254 | } |
2452 | | |
2453 | | static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, |
2454 | 104 | const VarDecl *VD, QualType T) { |
2455 | 104 | llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res = |
2456 | 104 | OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD); |
2457 | | // Return an invalid address if variable is MT_To and unified |
2458 | | // memory is not enabled. For all other cases: MT_Link and |
2459 | | // MT_To with unified memory, return a valid address. |
2460 | 104 | if (!Res || (85 *Res == OMPDeclareTargetDeclAttr::MT_To85 && |
2461 | 77 | !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) |
2462 | 94 | return Address::invalid(); |
2463 | 10 | assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) || |
2464 | 10 | (*Res == OMPDeclareTargetDeclAttr::MT_To && |
2465 | 10 | CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) && |
2466 | 10 | "Expected link clause OR to clause with unified memory enabled."); |
2467 | 10 | QualType PtrTy = CGF.getContext().getPointerType(VD->getType()); |
2468 | 10 | Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD); |
2469 | 10 | return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>()); |
2470 | 10 | } |
2471 | | |
2472 | | Address |
2473 | | CodeGenFunction::EmitLoadOfReference(LValue RefLVal, |
2474 | | LValueBaseInfo *PointeeBaseInfo, |
2475 | 80.9k | TBAAAccessInfo *PointeeTBAAInfo) { |
2476 | 80.9k | llvm::LoadInst *Load = |
2477 | 80.9k | Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile()); |
2478 | 80.9k | CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo()); |
2479 | | |
2480 | 80.9k | CharUnits Align = CGM.getNaturalTypeAlignment( |
2481 | 80.9k | RefLVal.getType()->getPointeeType(), PointeeBaseInfo, PointeeTBAAInfo, |
2482 | 80.9k | /* forPointeeType= */ true); |
2483 | 80.9k | return Address(Load, Align); |
2484 | 80.9k | } |
2485 | | |
2486 | 59.9k | LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) { |
2487 | 59.9k | LValueBaseInfo PointeeBaseInfo; |
2488 | 59.9k | TBAAAccessInfo PointeeTBAAInfo; |
2489 | 59.9k | Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo, |
2490 | 59.9k | &PointeeTBAAInfo); |
2491 | 59.9k | return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(), |
2492 | 59.9k | PointeeBaseInfo, PointeeTBAAInfo); |
2493 | 59.9k | } |
2494 | | |
2495 | | Address CodeGenFunction::EmitLoadOfPointer(Address Ptr, |
2496 | | const PointerType *PtrTy, |
2497 | | LValueBaseInfo *BaseInfo, |
2498 | 18.2k | TBAAAccessInfo *TBAAInfo) { |
2499 | 18.2k | llvm::Value *Addr = Builder.CreateLoad(Ptr); |
2500 | 18.2k | return Address(Addr, CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(), |
2501 | 18.2k | BaseInfo, TBAAInfo, |
2502 | 18.2k | /*forPointeeType=*/true)); |
2503 | 18.2k | } |
2504 | | |
2505 | | LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr, |
2506 | 16.7k | const PointerType *PtrTy) { |
2507 | 16.7k | LValueBaseInfo BaseInfo; |
2508 | 16.7k | TBAAAccessInfo TBAAInfo; |
2509 | 16.7k | Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo); |
2510 | 16.7k | return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo); |
2511 | 16.7k | } |
2512 | | |
2513 | | static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, |
2514 | 99.0k | const Expr *E, const VarDecl *VD) { |
2515 | 99.0k | QualType T = E->getType(); |
2516 | | |
2517 | | // If it's thread_local, emit a call to its wrapper function instead. |
2518 | 99.0k | if (VD->getTLSKind() == VarDecl::TLS_Dynamic && |
2519 | 226 | CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD)) |
2520 | 151 | return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T); |
2521 | | // Check if the variable is marked as declare target with link clause in |
2522 | | // device codegen. |
2523 | 98.8k | if (CGF.getLangOpts().OpenMPIsDevice) { |
2524 | 104 | Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T); |
2525 | 104 | if (Addr.isValid()) |
2526 | 10 | return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); |
2527 | 98.8k | } |
2528 | | |
2529 | 98.8k | llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); |
2530 | 98.8k | llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); |
2531 | 98.8k | V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); |
2532 | 98.8k | CharUnits Alignment = CGF.getContext().getDeclAlign(VD); |
2533 | 98.8k | Address Addr(V, Alignment); |
2534 | | // Emit reference to the private copy of the variable if it is an OpenMP |
2535 | | // threadprivate variable. |
2536 | 98.8k | if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd25.8k && |
2537 | 11.7k | VD->hasAttr<OMPThreadPrivateDeclAttr>()) { |
2538 | 144 | return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy, |
2539 | 144 | E->getExprLoc()); |
2540 | 144 | } |
2541 | 98.7k | LValue LV = VD->getType()->isReferenceType() ? |
2542 | 7.26k | CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(), |
2543 | 7.26k | AlignmentSource::Decl) : |
2544 | 91.4k | CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); |
2545 | 98.7k | setObjCGCLValueClass(CGF.getContext(), E, LV); |
2546 | 98.7k | return LV; |
2547 | 98.7k | } |
2548 | | |
2549 | | static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM, |
2550 | 158k | GlobalDecl GD) { |
2551 | 158k | const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); |
2552 | 158k | if (FD->hasAttr<WeakRefAttr>()) { |
2553 | 11 | ConstantAddress aliasee = CGM.GetWeakRefReference(FD); |
2554 | 11 | return aliasee.getPointer(); |
2555 | 11 | } |
2556 | | |
2557 | 158k | llvm::Constant *V = CGM.GetAddrOfFunction(GD); |
2558 | 158k | if (!FD->hasPrototype()) { |
2559 | 827 | if (const FunctionProtoType *Proto = |
2560 | 10 | FD->getType()->getAs<FunctionProtoType>()) { |
2561 | | // Ugly case: for a K&R-style definition, the type of the definition |
2562 | | // isn't the same as the type of a use. Correct for this with a |
2563 | | // bitcast. |
2564 | 10 | QualType NoProtoType = |
2565 | 10 | CGM.getContext().getFunctionNoProtoType(Proto->getReturnType()); |
2566 | 10 | NoProtoType = CGM.getContext().getPointerType(NoProtoType); |
2567 | 10 | V = llvm::ConstantExpr::getBitCast(V, |
2568 | 10 | CGM.getTypes().ConvertType(NoProtoType)); |
2569 | 10 | } |
2570 | 827 | } |
2571 | 158k | return V; |
2572 | 158k | } |
2573 | | |
2574 | | static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, |
2575 | 3.14k | GlobalDecl GD) { |
2576 | 3.14k | const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); |
2577 | 3.14k | llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, GD); |
2578 | 3.14k | CharUnits Alignment = CGF.getContext().getDeclAlign(FD); |
2579 | 3.14k | return CGF.MakeAddrLValue(V, E->getType(), Alignment, |
2580 | 3.14k | AlignmentSource::Decl); |
2581 | 3.14k | } |
2582 | | |
2583 | | static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, |
2584 | 4.72k | llvm::Value *ThisValue) { |
2585 | 4.72k | QualType TagType = CGF.getContext().getTagDeclType(FD->getParent()); |
2586 | 4.72k | LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType); |
2587 | 4.72k | return CGF.EmitLValueForField(LV, FD); |
2588 | 4.72k | } |
2589 | | |
2590 | | /// Named Registers are named metadata pointing to the register name |
2591 | | /// which will be read from/written to as an argument to the intrinsic |
2592 | | /// @llvm.read/write_register. |
2593 | | /// So far, only the name is being passed down, but other options such as |
2594 | | /// register type, allocation type or even optimization options could be |
2595 | | /// passed down via the metadata node. |
2596 | 38 | static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) { |
2597 | 38 | SmallString<64> Name("llvm.named.register."); |
2598 | 38 | AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>(); |
2599 | 38 | assert(Asm->getLabel().size() < 64-Name.size() && |
2600 | 38 | "Register name too big"); |
2601 | 38 | Name.append(Asm->getLabel()); |
2602 | 38 | llvm::NamedMDNode *M = |
2603 | 38 | CGM.getModule().getOrInsertNamedMetadata(Name); |
2604 | 38 | if (M->getNumOperands() == 0) { |
2605 | 19 | llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(), |
2606 | 19 | Asm->getLabel()); |
2607 | 19 | llvm::Metadata *Ops[] = {Str}; |
2608 | 19 | M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops)); |
2609 | 19 | } |
2610 | | |
2611 | 38 | CharUnits Alignment = CGM.getContext().getDeclAlign(VD); |
2612 | | |
2613 | 38 | llvm::Value *Ptr = |
2614 | 38 | llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0)); |
2615 | 38 | return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType()); |
2616 | 38 | } |
2617 | | |
2618 | | /// Determine whether we can emit a reference to \p VD from the current |
2619 | | /// context, despite not necessarily having seen an odr-use of the variable in |
2620 | | /// this context. |
2621 | | static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, |
2622 | | const DeclRefExpr *E, |
2623 | | const VarDecl *VD, |
2624 | 12 | bool IsConstant) { |
2625 | | // For a variable declared in an enclosing scope, do not emit a spurious |
2626 | | // reference even if we have a capture, as that will emit an unwarranted |
2627 | | // reference to our capture state, and will likely generate worse code than |
2628 | | // emitting a local copy. |
2629 | 12 | if (E->refersToEnclosingVariableOrCapture()) |
2630 | 0 | return false; |
2631 | | |
2632 | | // For a local declaration declared in this function, we can always reference |
2633 | | // it even if we don't have an odr-use. |
2634 | 12 | if (VD->hasLocalStorage()) { |
2635 | 8 | return VD->getDeclContext() == |
2636 | 8 | dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl); |
2637 | 8 | } |
2638 | | |
2639 | | // For a global declaration, we can emit a reference to it if we know |
2640 | | // for sure that we are able to emit a definition of it. |
2641 | 4 | VD = VD->getDefinition(CGF.getContext()); |
2642 | 4 | if (!VD) |
2643 | 1 | return false; |
2644 | | |
2645 | | // Don't emit a spurious reference if it might be to a variable that only |
2646 | | // exists on a different device / target. |
2647 | | // FIXME: This is unnecessarily broad. Check whether this would actually be a |
2648 | | // cross-target reference. |
2649 | 3 | if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA2 || |
2650 | 2 | CGF.getLangOpts().OpenCL) { |
2651 | 1 | return false; |
2652 | 1 | } |
2653 | | |
2654 | | // We can emit a spurious reference only if the linkage implies that we'll |
2655 | | // be emitting a non-interposable symbol that will be retained until link |
2656 | | // time. |
2657 | 2 | switch (CGF.CGM.getLLVMLinkageVarDefinition(VD, IsConstant)) { |
2658 | 0 | case llvm::GlobalValue::ExternalLinkage: |
2659 | 1 | case llvm::GlobalValue::LinkOnceODRLinkage: |
2660 | 1 | case llvm::GlobalValue::WeakODRLinkage: |
2661 | 2 | case llvm::GlobalValue::InternalLinkage: |
2662 | 2 | case llvm::GlobalValue::PrivateLinkage: |
2663 | 2 | return true; |
2664 | 0 | default: |
2665 | 0 | return false; |
2666 | 2 | } |
2667 | 2 | } |
2668 | | |
2669 | 1.16M | LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { |
2670 | 1.16M | const NamedDecl *ND = E->getDecl(); |
2671 | 1.16M | QualType T = E->getType(); |
2672 | | |
2673 | 1.16M | assert(E->isNonOdrUse() != NOUR_Unevaluated && |
2674 | 1.16M | "should not emit an unevaluated operand"); |
2675 | | |
2676 | 1.16M | if (const auto *VD = dyn_cast<VarDecl>(ND)) { |
2677 | | // Global Named registers access via intrinsics only |
2678 | 1.16M | if (VD->getStorageClass() == SC_Register && |
2679 | 709 | VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl()128 ) |
2680 | 38 | return EmitGlobalNamedRegister(VD, CGM); |
2681 | | |
2682 | | // If this DeclRefExpr does not constitute an odr-use of the variable, |
2683 | | // we're not permitted to emit a reference to it in general, and it might |
2684 | | // not be captured if capture would be necessary for a use. Emit the |
2685 | | // constant value directly instead. |
2686 | 1.16M | if (E->isNonOdrUse() == NOUR_Constant && |
2687 | 102 | (VD->getType()->isReferenceType() || |
2688 | 98 | !canEmitSpuriousReferenceToVariable(*this, E, VD, true)12 )) { |
2689 | 98 | VD->getAnyInitializer(VD); |
2690 | 98 | llvm::Constant *Val = ConstantEmitter(*this).emitAbstract( |
2691 | 98 | E->getLocation(), *VD->evaluateValue(), VD->getType()); |
2692 | 98 | assert(Val && "failed to emit constant expression"); |
2693 | | |
2694 | 98 | Address Addr = Address::invalid(); |
2695 | 98 | if (!VD->getType()->isReferenceType()) { |
2696 | | // Spill the constant value to a global. |
2697 | 8 | Addr = CGM.createUnnamedGlobalFrom(*VD, Val, |
2698 | 8 | getContext().getDeclAlign(VD)); |
2699 | 8 | llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType()); |
2700 | 8 | auto *PTy = llvm::PointerType::get( |
2701 | 8 | VarTy, getContext().getTargetAddressSpace(VD->getType())); |
2702 | 8 | if (PTy != Addr.getType()) |
2703 | 1 | Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy); |
2704 | 90 | } else { |
2705 | | // Should we be using the alignment of the constant pointer we emitted? |
2706 | 90 | CharUnits Alignment = |
2707 | 90 | CGM.getNaturalTypeAlignment(E->getType(), |
2708 | 90 | /* BaseInfo= */ nullptr, |
2709 | 90 | /* TBAAInfo= */ nullptr, |
2710 | 90 | /* forPointeeType= */ true); |
2711 | 90 | Addr = Address(Val, Alignment); |
2712 | 90 | } |
2713 | 98 | return MakeAddrLValue(Addr, T, AlignmentSource::Decl); |
2714 | 98 | } |
2715 | | |
2716 | | // FIXME: Handle other kinds of non-odr-use DeclRefExprs. |
2717 | | |
2718 | | // Check for captured variables. |
2719 | 1.16M | if (E->refersToEnclosingVariableOrCapture()) { |
2720 | 105k | VD = VD->getCanonicalDecl(); |
2721 | 105k | if (auto *FD = LambdaCaptureFields.lookup(VD)) |
2722 | 3.76k | return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue); |
2723 | 102k | if (CapturedStmtInfo) { |
2724 | 96.9k | auto I = LocalDeclMap.find(VD); |
2725 | 96.9k | if (I != LocalDeclMap.end()) { |
2726 | 96.0k | LValue CapLVal; |
2727 | 96.0k | if (VD->getType()->isReferenceType()) |
2728 | 4.24k | CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(), |
2729 | 4.24k | AlignmentSource::Decl); |
2730 | 91.7k | else |
2731 | 91.7k | CapLVal = MakeAddrLValue(I->second, T); |
2732 | | // Mark lvalue as nontemporal if the variable is marked as nontemporal |
2733 | | // in simd context. |
2734 | 96.0k | if (getLangOpts().OpenMP && |
2735 | 96.0k | CGM.getOpenMPRuntime().isNontemporalDecl(VD)) |
2736 | 182 | CapLVal.setNontemporal(/*Value=*/true); |
2737 | 96.0k | return CapLVal; |
2738 | 96.0k | } |
2739 | 957 | LValue CapLVal = |
2740 | 957 | EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD), |
2741 | 957 | CapturedStmtInfo->getContextValue()); |
2742 | 957 | CapLVal = MakeAddrLValue( |
2743 | 957 | Address(CapLVal.getPointer(*this), getContext().getDeclAlign(VD)), |
2744 | 957 | CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl), |
2745 | 957 | CapLVal.getTBAAInfo()); |
2746 | | // Mark lvalue as nontemporal if the variable is marked as nontemporal |
2747 | | // in simd context. |
2748 | 957 | if (getLangOpts().OpenMP && |
2749 | 906 | CGM.getOpenMPRuntime().isNontemporalDecl(VD)) |
2750 | 0 | CapLVal.setNontemporal(/*Value=*/true); |
2751 | 957 | return CapLVal; |
2752 | 957 | } |
2753 | | |
2754 | 5.14k | assert(isa<BlockDecl>(CurCodeDecl)); |
2755 | 5.14k | Address addr = GetAddrOfBlockDecl(VD); |
2756 | 5.14k | return MakeAddrLValue(addr, T, AlignmentSource::Decl); |
2757 | 5.14k | } |
2758 | 1.16M | } |
2759 | | |
2760 | | // FIXME: We should be able to assert this for FunctionDecls as well! |
2761 | | // FIXME: We should be able to assert this for all DeclRefExprs, not just |
2762 | | // those with a valid source location. |
2763 | 1.06M | assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() || |
2764 | 1.06M | !E->getLocation().isValid()) && |
2765 | 1.06M | "Should not use decl without marking it used!"); |
2766 | | |
2767 | 1.06M | if (ND->hasAttr<WeakRefAttr>()) { |
2768 | 20 | const auto *VD = cast<ValueDecl>(ND); |
2769 | 20 | ConstantAddress Aliasee = CGM.GetWeakRefReference(VD); |
2770 | 20 | return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl); |
2771 | 20 | } |
2772 | | |
2773 | 1.06M | if (const auto *VD = dyn_cast<VarDecl>(ND)) { |
2774 | | // Check if this is a global variable. |
2775 | 1.05M | if (VD->hasLinkage() || VD->isStaticDataMember()957k ) |
2776 | 99.0k | return EmitGlobalVarDeclLValue(*this, E, VD); |
2777 | | |
2778 | 957k | Address addr = Address::invalid(); |
2779 | | |
2780 | | // The variable should generally be present in the local decl map. |
2781 | 957k | auto iter = LocalDeclMap.find(VD); |
2782 | 957k | if (iter != LocalDeclMap.end()) { |
2783 | 957k | addr = iter->second; |
2784 | | |
2785 | | // Otherwise, it might be static local we haven't emitted yet for |
2786 | | // some reason; most likely, because it's in an outer function. |
2787 | 612 | } else if (VD->isStaticLocal()) { |
2788 | 612 | addr = Address(CGM.getOrCreateStaticVarDecl( |
2789 | 612 | *VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false)), |
2790 | 612 | getContext().getDeclAlign(VD)); |
2791 | | |
2792 | | // No other cases for now. |
2793 | 0 | } else { |
2794 | 0 | llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?"); |
2795 | 0 | } |
2796 | | |
2797 | | |
2798 | | // Check for OpenMP threadprivate variables. |
2799 | 957k | if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd342k && |
2800 | 237k | VD->hasAttr<OMPThreadPrivateDeclAttr>()) { |
2801 | 110 | return EmitThreadPrivateVarDeclLValue( |
2802 | 110 | *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()), |
2803 | 110 | E->getExprLoc()); |
2804 | 110 | } |
2805 | | |
2806 | | // Drill into block byref variables. |
2807 | 957k | bool isBlockByref = VD->isEscapingByref(); |
2808 | 957k | if (isBlockByref) { |
2809 | 40 | addr = emitBlockByrefAddress(addr, VD); |
2810 | 40 | } |
2811 | | |
2812 | | // Drill into reference types. |
2813 | 957k | LValue LV = VD->getType()->isReferenceType() ? |
2814 | 48.4k | EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) : |
2815 | 909k | MakeAddrLValue(addr, T, AlignmentSource::Decl); |
2816 | | |
2817 | 957k | bool isLocalStorage = VD->hasLocalStorage(); |
2818 | | |
2819 | 957k | bool NonGCable = isLocalStorage && |
2820 | 953k | !VD->getType()->isReferenceType() && |
2821 | 904k | !isBlockByref; |
2822 | 957k | if (NonGCable) { |
2823 | 904k | LV.getQuals().removeObjCGCAttr(); |
2824 | 904k | LV.setNonGC(true); |
2825 | 904k | } |
2826 | | |
2827 | 957k | bool isImpreciseLifetime = |
2828 | 957k | (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>()953k ); |
2829 | 957k | if (isImpreciseLifetime) |
2830 | 953k | LV.setARCPreciseLifetime(ARCImpreciseLifetime); |
2831 | 957k | setObjCGCLValueClass(getContext(), E, LV); |
2832 | 957k | return LV; |
2833 | 957k | } |
2834 | | |
2835 | 3.16k | if (const auto *FD = dyn_cast<FunctionDecl>(ND)) |
2836 | 3.14k | return EmitFunctionDeclLValue(*this, E, FD); |
2837 | | |
2838 | | // FIXME: While we're emitting a binding from an enclosing scope, all other |
2839 | | // DeclRefExprs we see should be implicitly treated as if they also refer to |
2840 | | // an enclosing scope. |
2841 | 15 | if (const auto *BD = dyn_cast<BindingDecl>(ND)) |
2842 | 8 | return EmitLValue(BD->getBinding()); |
2843 | | |
2844 | | // We can form DeclRefExprs naming GUID declarations when reconstituting |
2845 | | // non-type template parameters into expressions. |
2846 | 7 | if (const auto *GD = dyn_cast<MSGuidDecl>(ND)) |
2847 | 5 | return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T, |
2848 | 5 | AlignmentSource::Decl); |
2849 | | |
2850 | 2 | if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) |
2851 | 2 | return MakeAddrLValue(CGM.GetAddrOfTemplateParamObject(TPO), T, |
2852 | 2 | AlignmentSource::Decl); |
2853 | | |
2854 | 0 | llvm_unreachable("Unhandled DeclRefExpr"); |
2855 | 0 | } |
2856 | | |
2857 | 50.6k | LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { |
2858 | | // __extension__ doesn't affect lvalue-ness. |
2859 | 50.6k | if (E->getOpcode() == UO_Extension) |
2860 | 0 | return EmitLValue(E->getSubExpr()); |
2861 | | |
2862 | 50.6k | QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); |
2863 | 50.6k | switch (E->getOpcode()) { |
2864 | 0 | default: llvm_unreachable("Unknown unary operator lvalue!"); |
2865 | 38.6k | case UO_Deref: { |
2866 | 38.6k | QualType T = E->getSubExpr()->getType()->getPointeeType(); |
2867 | 38.6k | assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); |
2868 | | |
2869 | 38.6k | LValueBaseInfo BaseInfo; |
2870 | 38.6k | TBAAAccessInfo TBAAInfo; |
2871 | 38.6k | Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo, |
2872 | 38.6k | &TBAAInfo); |
2873 | 38.6k | LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo); |
2874 | 38.6k | LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); |
2875 | | |
2876 | | // We should not generate __weak write barrier on indirect reference |
2877 | | // of a pointer to object; as in void foo (__weak id *param); *param = 0; |
2878 | | // But, we continue to generate __strong write barrier on indirect write |
2879 | | // into a pointer to object. |
2880 | 38.6k | if (getLangOpts().ObjC && |
2881 | 9.73k | getLangOpts().getGC() != LangOptions::NonGC && |
2882 | 44 | LV.isObjCWeak()) |
2883 | 10 | LV.setNonGC(!E->isOBJCGCCandidate(getContext())); |
2884 | 38.6k | return LV; |
2885 | 0 | } |
2886 | 110 | case UO_Real: |
2887 | 219 | case UO_Imag: { |
2888 | 219 | LValue LV = EmitLValue(E->getSubExpr()); |
2889 | 219 | assert(LV.isSimple() && "real/imag on non-ordinary l-value"); |
2890 | | |
2891 | | // __real is valid on scalars. This is a faster way of testing that. |
2892 | | // __imag can only produce an rvalue on scalars. |
2893 | 219 | if (E->getOpcode() == UO_Real && |
2894 | 110 | !LV.getAddress(*this).getElementType()->isStructTy()) { |
2895 | 4 | assert(E->getSubExpr()->getType()->isArithmeticType()); |
2896 | 4 | return LV; |
2897 | 4 | } |
2898 | | |
2899 | 215 | QualType T = ExprTy->castAs<ComplexType>()->getElementType(); |
2900 | | |
2901 | 215 | Address Component = |
2902 | 215 | (E->getOpcode() == UO_Real |
2903 | 106 | ? emitAddrOfRealComponent(LV.getAddress(*this), LV.getType()) |
2904 | 109 | : emitAddrOfImagComponent(LV.getAddress(*this), LV.getType())); |
2905 | 215 | LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(), |
2906 | 215 | CGM.getTBAAInfoForSubobject(LV, T)); |
2907 | 215 | ElemLV.getQuals().addQualifiers(LV.getQuals()); |
2908 | 215 | return ElemLV; |
2909 | 215 | } |
2910 | 11.2k | case UO_PreInc: |
2911 | 11.8k | case UO_PreDec: { |
2912 | 11.8k | LValue LV = EmitLValue(E->getSubExpr()); |
2913 | 11.8k | bool isInc = E->getOpcode() == UO_PreInc; |
2914 | | |
2915 | 11.8k | if (E->getType()->isAnyComplexType()) |
2916 | 0 | EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); |
2917 | 11.8k | else |
2918 | 11.8k | EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); |
2919 | 11.8k | return LV; |
2920 | 11.2k | } |
2921 | 50.6k | } |
2922 | 50.6k | } |
2923 | | |
2924 | 49.5k | LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { |
2925 | 49.5k | return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), |
2926 | 49.5k | E->getType(), AlignmentSource::Decl); |
2927 | 49.5k | } |
2928 | | |
2929 | 16 | LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { |
2930 | 16 | return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), |
2931 | 16 | E->getType(), AlignmentSource::Decl); |
2932 | 16 | } |
2933 | | |
2934 | 514 | LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { |
2935 | 514 | auto SL = E->getFunctionName(); |
2936 | 514 | assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); |
2937 | 514 | StringRef FnName = CurFn->getName(); |
2938 | 514 | if (FnName.startswith("\01")) |
2939 | 84 | FnName = FnName.substr(1); |
2940 | 514 | StringRef NameItems[] = { |
2941 | 514 | PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName}; |
2942 | 514 | std::string GVName = llvm::join(NameItems, NameItems + 2, "."); |
2943 | 514 | if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) { |
2944 | 28 | std::string Name = std::string(SL->getString()); |
2945 | 28 | if (!Name.empty()) { |
2946 | 26 | unsigned Discriminator = |
2947 | 26 | CGM.getCXXABI().getMangleContext().getBlockId(BD, true); |
2948 | 26 | if (Discriminator) |
2949 | 13 | Name += "_" + Twine(Discriminator + 1).str(); |
2950 | 26 | auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str()); |
2951 | 26 | return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); |
2952 | 2 | } else { |
2953 | 2 | auto C = |
2954 | 2 | CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str()); |
2955 | 2 | return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); |
2956 | 2 | } |
2957 | 486 | } |
2958 | 486 | auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName); |
2959 | 486 | return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); |
2960 | 486 | } |
2961 | | |
2962 | | /// Emit a type description suitable for use by a runtime sanitizer library. The |
2963 | | /// format of a type descriptor is |
2964 | | /// |
2965 | | /// \code |
2966 | | /// { i16 TypeKind, i16 TypeInfo } |
2967 | | /// \endcode |
2968 | | /// |
2969 | | /// followed by an array of i8 containing the type name. TypeKind is 0 for an |
2970 | | /// integer, 1 for a floating point value, and -1 for anything else. |
2971 | 2.66k | llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { |
2972 | | // Only emit each type's descriptor once. |
2973 | 2.66k | if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T)) |
2974 | 2.11k | return C; |
2975 | | |
2976 | 544 | uint16_t TypeKind = -1; |
2977 | 544 | uint16_t TypeInfo = 0; |
2978 | | |
2979 | 544 | if (T->isIntegerType()) { |
2980 | 268 | TypeKind = 0; |
2981 | 268 | TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) | |
2982 | 138 | (T->isSignedIntegerType() ? 1 : 0130 ); |
2983 | 276 | } else if (T->isFloatingType()) { |
2984 | 10 | TypeKind = 1; |
2985 | 10 | TypeInfo = getContext().getTypeSize(T); |
2986 | 10 | } |
2987 | | |
2988 | | // Format the type name as if for a diagnostic, including quotes and |
2989 | | // optionally an 'aka'. |
2990 | 544 | SmallString<32> Buffer; |
2991 | 544 | CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype, |
2992 | 544 | (intptr_t)T.getAsOpaquePtr(), |
2993 | 544 | StringRef(), StringRef(), None, Buffer, |
2994 | 544 | None); |
2995 | | |
2996 | 544 | llvm::Constant *Components[] = { |
2997 | 544 | Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo), |
2998 | 544 | llvm::ConstantDataArray::getString(getLLVMContext(), Buffer) |
2999 | 544 | }; |
3000 | 544 | llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components); |
3001 | | |
3002 | 544 | auto *GV = new llvm::GlobalVariable( |
3003 | 544 | CGM.getModule(), Descriptor->getType(), |
3004 | 544 | /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor); |
3005 | 544 | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3006 | 544 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV); |
3007 | | |
3008 | | // Remember the descriptor for this type. |
3009 | 544 | CGM.setTypeDescriptorInMap(T, GV); |
3010 | | |
3011 | 544 | return GV; |
3012 | 544 | } |
3013 | | |
3014 | 2.66k | llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { |
3015 | 2.66k | llvm::Type *TargetTy = IntPtrTy; |
3016 | | |
3017 | 2.66k | if (V->getType() == TargetTy) |
3018 | 636 | return V; |
3019 | | |
3020 | | // Floating-point types which fit into intptr_t are bitcast to integers |
3021 | | // and then passed directly (after zero-extension, if necessary). |
3022 | 2.02k | if (V->getType()->isFloatingPointTy()) { |
3023 | 12 | unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedSize(); |
3024 | 12 | if (Bits <= TargetTy->getIntegerBitWidth()) |
3025 | 11 | V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(), |
3026 | 11 | Bits)); |
3027 | 12 | } |
3028 | | |
3029 | | // Integers which fit in intptr_t are zero-extended and passed directly. |
3030 | 2.02k | if (V->getType()->isIntegerTy() && |
3031 | 1.65k | V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) |
3032 | 1.65k | return Builder.CreateZExt(V, TargetTy); |
3033 | | |
3034 | | // Pointers are passed directly, everything else is passed by address. |
3035 | 379 | if (!V->getType()->isPointerTy()) { |
3036 | 3 | Address Ptr = CreateDefaultAlignTempAlloca(V->getType()); |
3037 | 3 | Builder.CreateStore(V, Ptr); |
3038 | 3 | V = Ptr.getPointer(); |
3039 | 3 | } |
3040 | 379 | return Builder.CreatePtrToInt(V, TargetTy); |
3041 | 379 | } |
3042 | | |
3043 | | /// Emit a representation of a SourceLocation for passing to a handler |
3044 | | /// in a sanitizer runtime library. The format for this data is: |
3045 | | /// \code |
3046 | | /// struct SourceLocation { |
3047 | | /// const char *Filename; |
3048 | | /// int32_t Line, Column; |
3049 | | /// }; |
3050 | | /// \endcode |
3051 | | /// For an invalid SourceLocation, the Filename pointer is null. |
3052 | 1.99k | llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { |
3053 | 1.99k | llvm::Constant *Filename; |
3054 | 1.99k | int Line, Column; |
3055 | | |
3056 | 1.99k | PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); |
3057 | 1.99k | if (PLoc.isValid()) { |
3058 | 1.97k | StringRef FilenameString = PLoc.getFilename(); |
3059 | | |
3060 | 1.97k | int PathComponentsToStrip = |
3061 | 1.97k | CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip; |
3062 | 1.97k | if (PathComponentsToStrip < 0) { |
3063 | 3 | assert(PathComponentsToStrip != INT_MIN); |
3064 | 3 | int PathComponentsToKeep = -PathComponentsToStrip; |
3065 | 3 | auto I = llvm::sys::path::rbegin(FilenameString); |
3066 | 3 | auto E = llvm::sys::path::rend(FilenameString); |
3067 | 15 | while (I != E && --PathComponentsToKeep14 ) |
3068 | 12 | ++I; |
3069 | | |
3070 | 3 | FilenameString = FilenameString.substr(I - E); |
3071 | 1.96k | } else if (PathComponentsToStrip > 0) { |
3072 | 2 | auto I = llvm::sys::path::begin(FilenameString); |
3073 | 2 | auto E = llvm::sys::path::end(FilenameString); |
3074 | 15 | while (I != E && PathComponentsToStrip--14 ) |
3075 | 13 | ++I; |
3076 | | |
3077 | 2 | if (I != E) |
3078 | 1 | FilenameString = |
3079 | 1 | FilenameString.substr(I - llvm::sys::path::begin(FilenameString)); |
3080 | 1 | else |
3081 | 1 | FilenameString = llvm::sys::path::filename(FilenameString); |
3082 | 2 | } |
3083 | | |
3084 | 1.97k | auto FilenameGV = |
3085 | 1.97k | CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src"); |
3086 | 1.97k | CGM.getSanitizerMetadata()->disableSanitizerForGlobal( |
3087 | 1.97k | cast<llvm::GlobalVariable>(FilenameGV.getPointer())); |
3088 | 1.97k | Filename = FilenameGV.getPointer(); |
3089 | 1.97k | Line = PLoc.getLine(); |
3090 | 1.97k | Column = PLoc.getColumn(); |
3091 | 24 | } else { |
3092 | 24 | Filename = llvm::Constant::getNullValue(Int8PtrTy); |
3093 | 24 | Line = Column = 0; |
3094 | 24 | } |
3095 | | |
3096 | 1.99k | llvm::Constant *Data[] = {Filename, Builder.getInt32(Line), |
3097 | 1.99k | Builder.getInt32(Column)}; |
3098 | | |
3099 | 1.99k | return llvm::ConstantStruct::getAnon(Data); |
3100 | 1.99k | } |
3101 | | |
3102 | | namespace { |
3103 | | /// Specify under what conditions this check can be recovered |
3104 | | enum class CheckRecoverableKind { |
3105 | | /// Always terminate program execution if this check fails. |
3106 | | Unrecoverable, |
3107 | | /// Check supports recovering, runtime has both fatal (noreturn) and |
3108 | | /// non-fatal handlers for this check. |
3109 | | Recoverable, |
3110 | | /// Runtime conditionally aborts, always need to support recovery. |
3111 | | AlwaysRecoverable |
3112 | | }; |
3113 | | } |
3114 | | |
3115 | 1.77k | static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) { |
3116 | 1.77k | assert(Kind.countPopulation() == 1); |
3117 | 1.77k | if (Kind == SanitizerKind::Function || Kind == SanitizerKind::Vptr1.77k ) |
3118 | 57 | return CheckRecoverableKind::AlwaysRecoverable; |
3119 | 1.72k | else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable1.72k ) |
3120 | 19 | return CheckRecoverableKind::Unrecoverable; |
3121 | 1.70k | else |
3122 | 1.70k | return CheckRecoverableKind::Recoverable; |
3123 | 1.77k | } |
3124 | | |
3125 | | namespace { |
3126 | | struct SanitizerHandlerInfo { |
3127 | | char const *const Name; |
3128 | | unsigned Version; |
3129 | | }; |
3130 | | } |
3131 | | |
3132 | | const SanitizerHandlerInfo SanitizerHandlers[] = { |
3133 | | #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version}, |
3134 | | LIST_SANITIZER_CHECKS |
3135 | | #undef SANITIZER_CHECK |
3136 | | }; |
3137 | | |
3138 | | static void emitCheckHandlerCall(CodeGenFunction &CGF, |
3139 | | llvm::FunctionType *FnType, |
3140 | | ArrayRef<llvm::Value *> FnArgs, |
3141 | | SanitizerHandler CheckHandler, |
3142 | | CheckRecoverableKind RecoverKind, bool IsFatal, |
3143 | 1.51k | llvm::BasicBlock *ContBB) { |
3144 | 1.51k | assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable); |
3145 | 1.51k | Optional<ApplyDebugLocation> DL; |
3146 | 1.51k | if (!CGF.Builder.getCurrentDebugLocation()) { |
3147 | | // Ensure that the call has at least an artificial debug location. |
3148 | 1.51k | DL.emplace(CGF, SourceLocation()); |
3149 | 1.51k | } |
3150 | 1.51k | bool NeedsAbortSuffix = |
3151 | 1.51k | IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable822 ; |
3152 | 1.51k | bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime; |
3153 | 1.51k | const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler]; |
3154 | 1.51k | const StringRef CheckName = CheckInfo.Name; |
3155 | 1.51k | std::string FnName = "__ubsan_handle_" + CheckName.str(); |
3156 | 1.51k | if (CheckInfo.Version && !MinimalRuntime370 ) |
3157 | 370 | FnName += "_v" + llvm::utostr(CheckInfo.Version); |
3158 | 1.51k | if (MinimalRuntime) |
3159 | 3 | FnName += "_minimal"; |
3160 | 1.51k | if (NeedsAbortSuffix) |
3161 | 803 | FnName += "_abort"; |
3162 | 1.51k | bool MayReturn = |
3163 | 1.51k | !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable822 ; |
3164 | | |
3165 | 1.51k | llvm::AttrBuilder B; |
3166 | 1.51k | if (!MayReturn) { |
3167 | 798 | B.addAttribute(llvm::Attribute::NoReturn) |
3168 | 798 | .addAttribute(llvm::Attribute::NoUnwind); |
3169 | 798 | } |
3170 | 1.51k | B.addAttribute(llvm::Attribute::UWTable); |
3171 | | |
3172 | 1.51k | llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction( |
3173 | 1.51k | FnType, FnName, |
3174 | 1.51k | llvm::AttributeList::get(CGF.getLLVMContext(), |
3175 | 1.51k | llvm::AttributeList::FunctionIndex, B), |
3176 | 1.51k | /*Local=*/true); |
3177 | 1.51k | llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs); |
3178 | 1.51k | if (!MayReturn) { |
3179 | 798 | HandlerCall->setDoesNotReturn(); |
3180 | 798 | CGF.Builder.CreateUnreachable(); |
3181 | 719 | } else { |
3182 | 719 | CGF.Builder.CreateBr(ContBB); |
3183 | 719 | } |
3184 | 1.51k | } |
3185 | | |
3186 | | void CodeGenFunction::EmitCheck( |
3187 | | ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked, |
3188 | | SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs, |
3189 | 1.90k | ArrayRef<llvm::Value *> DynamicArgs) { |
3190 | 1.90k | assert(IsSanitizerScope); |
3191 | 1.90k | assert(Checked.size() > 0); |
3192 | 1.90k | assert(CheckHandler >= 0 && |
3193 | 1.90k | size_t(CheckHandler) < llvm::array_lengthof(SanitizerHandlers)); |
3194 | 1.90k | const StringRef CheckName = SanitizerHandlers[CheckHandler].Name; |
3195 | | |
3196 | 1.90k | llvm::Value *FatalCond = nullptr; |
3197 | 1.90k | llvm::Value *RecoverableCond = nullptr; |
3198 | 1.90k | llvm::Value *TrapCond = nullptr; |
3199 | 4.12k | for (int i = 0, n = Checked.size(); i < n; ++i2.22k ) { |
3200 | 2.22k | llvm::Value *Check = Checked[i].first; |
3201 | | // -fsanitize-trap= overrides -fsanitize-recover=. |
3202 | 2.22k | llvm::Value *&Cond = |
3203 | 2.22k | CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second) |
3204 | 446 | ? TrapCond |
3205 | 1.77k | : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second) |
3206 | 838 | ? RecoverableCond |
3207 | 939 | : FatalCond; |
3208 | 1.90k | Cond = Cond ? Builder.CreateAnd(Cond, Check)317 : Check; |
3209 | 2.22k | } |
3210 | | |
3211 | 1.90k | if (TrapCond) |
3212 | 389 | EmitTrapCheck(TrapCond, CheckHandler); |
3213 | 1.90k | if (!FatalCond && !RecoverableCond1.08k ) |
3214 | 388 | return; |
3215 | | |
3216 | 1.51k | llvm::Value *JointCond; |
3217 | 1.51k | if (FatalCond && RecoverableCond822 ) |
3218 | 0 | JointCond = Builder.CreateAnd(FatalCond, RecoverableCond); |
3219 | 1.51k | else |
3220 | 1.51k | JointCond = FatalCond ? FatalCond822 : RecoverableCond695 ; |
3221 | 1.51k | assert(JointCond); |
3222 | | |
3223 | 1.51k | CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second); |
3224 | 1.51k | assert(SanOpts.has(Checked[0].second)); |
3225 | 1.51k | #ifndef NDEBUG |
3226 | 1.77k | for (int i = 1, n = Checked.size(); i < n; ++i261 ) { |
3227 | 261 | assert(RecoverKind == getRecoverableKind(Checked[i].second) && |
3228 | 261 | "All recoverable kinds in a single check must be same!"); |
3229 | 261 | assert(SanOpts.has(Checked[i].second)); |
3230 | 261 | } |
3231 | 1.51k | #endif |
3232 | | |
3233 | 1.51k | llvm::BasicBlock *Cont = createBasicBlock("cont"); |
3234 | 1.51k | llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName); |
3235 | 1.51k | llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers); |
3236 | | // Give hint that we very much don't expect to execute the handler |
3237 | | // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp |
3238 | 1.51k | llvm::MDBuilder MDHelper(getLLVMContext()); |
3239 | 1.51k | llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); |
3240 | 1.51k | Branch->setMetadata(llvm::LLVMContext::MD_prof, Node); |
3241 | 1.51k | EmitBlock(Handlers); |
3242 | | |
3243 | | // Handler functions take an i8* pointing to the (handler-specific) static |
3244 | | // information block, followed by a sequence of intptr_t arguments |
3245 | | // representing operand values. |
3246 | 1.51k | SmallVector<llvm::Value *, 4> Args; |
3247 | 1.51k | SmallVector<llvm::Type *, 4> ArgTypes; |
3248 | 1.51k | if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) { |
3249 | 1.51k | Args.reserve(DynamicArgs.size() + 1); |
3250 | 1.51k | ArgTypes.reserve(DynamicArgs.size() + 1); |
3251 | | |
3252 | | // Emit handler arguments and create handler function type. |
3253 | 1.51k | if (!StaticArgs.empty()) { |
3254 | 1.50k | llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); |
3255 | 1.50k | auto *InfoPtr = |
3256 | 1.50k | new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, |
3257 | 1.50k | llvm::GlobalVariable::PrivateLinkage, Info); |
3258 | 1.50k | InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3259 | 1.50k | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); |
3260 | 1.50k | Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy)); |
3261 | 1.50k | ArgTypes.push_back(Int8PtrTy); |
3262 | 1.50k | } |
3263 | | |
3264 | 4.06k | for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i2.54k ) { |
3265 | 2.54k | Args.push_back(EmitCheckValue(DynamicArgs[i])); |
3266 | 2.54k | ArgTypes.push_back(IntPtrTy); |
3267 | 2.54k | } |
3268 | 1.51k | } |
3269 | | |
3270 | 1.51k | llvm::FunctionType *FnType = |
3271 | 1.51k | llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false); |
3272 | | |
3273 | 1.51k | if (!FatalCond || !RecoverableCond822 ) { |
3274 | | // Simple case: we need to generate a single handler call, either |
3275 | | // fatal, or non-fatal. |
3276 | 1.51k | emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, |
3277 | 1.51k | (FatalCond != nullptr), Cont); |
3278 | 0 | } else { |
3279 | | // Emit two handler calls: first one for set of unrecoverable checks, |
3280 | | // another one for recoverable. |
3281 | 0 | llvm::BasicBlock *NonFatalHandlerBB = |
3282 | 0 | createBasicBlock("non_fatal." + CheckName); |
3283 | 0 | llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName); |
3284 | 0 | Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB); |
3285 | 0 | EmitBlock(FatalHandlerBB); |
3286 | 0 | emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true, |
3287 | 0 | NonFatalHandlerBB); |
3288 | 0 | EmitBlock(NonFatalHandlerBB); |
3289 | 0 | emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false, |
3290 | 0 | Cont); |
3291 | 0 | } |
3292 | | |
3293 | 1.51k | EmitBlock(Cont); |
3294 | 1.51k | } |
3295 | | |
3296 | | void CodeGenFunction::EmitCfiSlowPathCheck( |
3297 | | SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, |
3298 | 7 | llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) { |
3299 | 7 | llvm::BasicBlock *Cont = createBasicBlock("cfi.cont"); |
3300 | | |
3301 | 7 | llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath"); |
3302 | 7 | llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB); |
3303 | | |
3304 | 7 | llvm::MDBuilder MDHelper(getLLVMContext()); |
3305 | 7 | llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); |
3306 | 7 | BI->setMetadata(llvm::LLVMContext::MD_prof, Node); |
3307 | | |
3308 | 7 | EmitBlock(CheckBB); |
3309 | | |
3310 | 7 | bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind); |
3311 | | |
3312 | 7 | llvm::CallInst *CheckCall; |
3313 | 7 | llvm::FunctionCallee SlowPathFn; |
3314 | 7 | if (WithDiag) { |
3315 | 4 | llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); |
3316 | 4 | auto *InfoPtr = |
3317 | 4 | new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, |
3318 | 4 | llvm::GlobalVariable::PrivateLinkage, Info); |
3319 | 4 | InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
3320 | 4 | CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); |
3321 | | |
3322 | 4 | SlowPathFn = CGM.getModule().getOrInsertFunction( |
3323 | 4 | "__cfi_slowpath_diag", |
3324 | 4 | llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, |
3325 | 4 | false)); |
3326 | 4 | CheckCall = Builder.CreateCall( |
3327 | 4 | SlowPathFn, {TypeId, Ptr, Builder.CreateBitCast(InfoPtr, Int8PtrTy)}); |
3328 | 3 | } else { |
3329 | 3 | SlowPathFn = CGM.getModule().getOrInsertFunction( |
3330 | 3 | "__cfi_slowpath", |
3331 | 3 | llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false)); |
3332 | 3 | CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr}); |
3333 | 3 | } |
3334 | | |
3335 | 7 | CGM.setDSOLocal( |
3336 | 7 | cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts())); |
3337 | 7 | CheckCall->setDoesNotThrow(); |
3338 | | |
3339 | 7 | EmitBlock(Cont); |
3340 | 7 | } |
3341 | | |
3342 | | // Emit a stub for __cfi_check function so that the linker knows about this |
3343 | | // symbol in LTO mode. |
3344 | 14 | void CodeGenFunction::EmitCfiCheckStub() { |
3345 | 14 | llvm::Module *M = &CGM.getModule(); |
3346 | 14 | auto &Ctx = M->getContext(); |
3347 | 14 | llvm::Function *F = llvm::Function::Create( |
3348 | 14 | llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false), |
3349 | 14 | llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M); |
3350 | 14 | CGM.setDSOLocal(F); |
3351 | 14 | llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F); |
3352 | | // FIXME: consider emitting an intrinsic call like |
3353 | | // call void @llvm.cfi_check(i64 %0, i8* %1, i8* %2) |
3354 | | // which can be lowered in CrossDSOCFI pass to the actual contents of |
3355 | | // __cfi_check. This would allow inlining of __cfi_check calls. |
3356 | 14 | llvm::CallInst::Create( |
3357 | 14 | llvm::Intrinsic::getDeclaration(M, llvm::Intrinsic::trap), "", BB); |
3358 | 14 | llvm::ReturnInst::Create(Ctx, nullptr, BB); |
3359 | 14 | } |
3360 | | |
3361 | | // This function is basically a switch over the CFI failure kind, which is |
3362 | | // extracted from CFICheckFailData (1st function argument). Each case is either |
3363 | | // llvm.trap or a call to one of the two runtime handlers, based on |
3364 | | // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid |
3365 | | // failure kind) traps, but this should really never happen. CFICheckFailData |
3366 | | // can be nullptr if the calling module has -fsanitize-trap behavior for this |
3367 | | // check kind; in this case __cfi_check_fail traps as well. |
3368 | 14 | void CodeGenFunction::EmitCfiCheckFail() { |
3369 | 14 | SanitizerScope SanScope(this); |
3370 | 14 | FunctionArgList Args; |
3371 | 14 | ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy, |
3372 | 14 | ImplicitParamDecl::Other); |
3373 | 14 | ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy, |
3374 | 14 | ImplicitParamDecl::Other); |
3375 | 14 | Args.push_back(&ArgData); |
3376 | 14 | Args.push_back(&ArgAddr); |
3377 | | |
3378 | 14 | const CGFunctionInfo &FI = |
3379 | 14 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args); |
3380 | | |
3381 | 14 | llvm::Function *F = llvm::Function::Create( |
3382 | 14 | llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false), |
3383 | 14 | llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule()); |
3384 | | |
3385 | 14 | CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F); |
3386 | 14 | CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F); |
3387 | 14 | F->setVisibility(llvm::GlobalValue::HiddenVisibility); |
3388 | | |
3389 | 14 | StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args, |
3390 | 14 | SourceLocation()); |
3391 | | |
3392 | | // This function should not be affected by blacklist. This function does |
3393 | | // not have a source location, but "src:*" would still apply. Revert any |
3394 | | // changes to SanOpts made in StartFunction. |
3395 | 14 | SanOpts = CGM.getLangOpts().Sanitize; |
3396 | | |
3397 | 14 | llvm::Value *Data = |
3398 | 14 | EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false, |
3399 | 14 | CGM.getContext().VoidPtrTy, ArgData.getLocation()); |
3400 | 14 | llvm::Value *Addr = |
3401 | 14 | EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false, |
3402 | 14 | CGM.getContext().VoidPtrTy, ArgAddr.getLocation()); |
3403 | | |
3404 | | // Data == nullptr means the calling module has trap behaviour for this check. |
3405 | 14 | llvm::Value *DataIsNotNullPtr = |
3406 | 14 | Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy)); |
3407 | 14 | EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail); |
3408 | | |
3409 | 14 | llvm::StructType *SourceLocationTy = |
3410 | 14 | llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty); |
3411 | 14 | llvm::StructType *CfiCheckFailDataTy = |
3412 | 14 | llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy); |
3413 | | |
3414 | 14 | llvm::Value *V = Builder.CreateConstGEP2_32( |
3415 | 14 | CfiCheckFailDataTy, |
3416 | 14 | Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0, |
3417 | 14 | 0); |
3418 | 14 | Address CheckKindAddr(V, getIntAlign()); |
3419 | 14 | llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr); |
3420 | | |
3421 | 14 | llvm::Value *AllVtables = llvm::MetadataAsValue::get( |
3422 | 14 | CGM.getLLVMContext(), |
3423 | 14 | llvm::MDString::get(CGM.getLLVMContext(), "all-vtables")); |
3424 | 14 | llvm::Value *ValidVtable = Builder.CreateZExt( |
3425 | 14 | Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test), |
3426 | 14 | {Addr, AllVtables}), |
3427 | 14 | IntPtrTy); |
3428 | | |
3429 | 14 | const std::pair<int, SanitizerMask> CheckKinds[] = { |
3430 | 14 | {CFITCK_VCall, SanitizerKind::CFIVCall}, |
3431 | 14 | {CFITCK_NVCall, SanitizerKind::CFINVCall}, |
3432 | 14 | {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast}, |
3433 | 14 | {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast}, |
3434 | 14 | {CFITCK_ICall, SanitizerKind::CFIICall}}; |
3435 | | |
3436 | 14 | SmallVector<std::pair<llvm::Value *, SanitizerMask>, 5> Checks; |
3437 | 70 | for (auto CheckKindMaskPair : CheckKinds) { |
3438 | 70 | int Kind = CheckKindMaskPair.first; |
3439 | 70 | SanitizerMask Mask = CheckKindMaskPair.second; |
3440 | 70 | llvm::Value *Cond = |
3441 | 70 | Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind)); |
3442 | 70 | if (CGM.getLangOpts().Sanitize.has(Mask)) |
3443 | 17 | EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {}, |
3444 | 17 | {Data, Addr, ValidVtable}); |
3445 | 53 | else |
3446 | 53 | EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail); |
3447 | 70 | } |
3448 | | |
3449 | 14 | FinishFunction(); |
3450 | | // The only reference to this function will be created during LTO link. |
3451 | | // Make sure it survives until then. |
3452 | 14 | CGM.addUsedGlobal(F); |
3453 | 14 | } |
3454 | | |
3455 | 1.89k | void CodeGenFunction::EmitUnreachable(SourceLocation Loc) { |
3456 | 1.89k | if (SanOpts.has(SanitizerKind::Unreachable)) { |
3457 | 18 | SanitizerScope SanScope(this); |
3458 | 18 | EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()), |
3459 | 18 | SanitizerKind::Unreachable), |
3460 | 18 | SanitizerHandler::BuiltinUnreachable, |
3461 | 18 | EmitCheckSourceLocation(Loc), None); |
3462 | 18 | } |
3463 | 1.89k | Builder.CreateUnreachable(); |
3464 | 1.89k | } |
3465 | | |
3466 | | void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked, |
3467 | 499 | SanitizerHandler CheckHandlerID) { |
3468 | 499 | llvm::BasicBlock *Cont = createBasicBlock("cont"); |
3469 | | |
3470 | | // If we're optimizing, collapse all calls to trap down to just one per |
3471 | | // check-type per function to save on code size. |
3472 | 499 | if (TrapBBs.size() <= CheckHandlerID) |
3473 | 430 | TrapBBs.resize(CheckHandlerID + 1); |
3474 | 499 | llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID]; |
3475 | | |
3476 | 499 | if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB43 ) { |
3477 | 471 | TrapBB = createBasicBlock("trap"); |
3478 | 471 | Builder.CreateCondBr(Checked, Cont, TrapBB); |
3479 | 471 | EmitBlock(TrapBB); |
3480 | | |
3481 | 471 | llvm::CallInst *TrapCall = |
3482 | 471 | Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap), |
3483 | 471 | llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID)); |
3484 | | |
3485 | 471 | if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { |
3486 | 1 | auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name", |
3487 | 1 | CGM.getCodeGenOpts().TrapFuncName); |
3488 | 1 | TrapCall->addAttribute(llvm::AttributeList::FunctionIndex, A); |
3489 | 1 | } |
3490 | 471 | TrapCall->setDoesNotReturn(); |
3491 | 471 | TrapCall->setDoesNotThrow(); |
3492 | 471 | Builder.CreateUnreachable(); |
3493 | 28 | } else { |
3494 | 28 | auto Call = TrapBB->begin(); |
3495 | 28 | assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB"); |
3496 | | |
3497 | 28 | Call->applyMergedLocation(Call->getDebugLoc(), |
3498 | 28 | Builder.getCurrentDebugLocation()); |
3499 | 28 | Builder.CreateCondBr(Checked, Cont, TrapBB); |
3500 | 28 | } |
3501 | | |
3502 | 499 | EmitBlock(Cont); |
3503 | 499 | } |
3504 | | |
3505 | 239 | llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) { |
3506 | 239 | llvm::CallInst *TrapCall = |
3507 | 239 | Builder.CreateCall(CGM.getIntrinsic(IntrID)); |
3508 | | |
3509 | 239 | if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { |
3510 | 2 | auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name", |
3511 | 2 | CGM.getCodeGenOpts().TrapFuncName); |
3512 | 2 | TrapCall->addAttribute(llvm::AttributeList::FunctionIndex, A); |
3513 | 2 | } |
3514 | | |
3515 | 239 | return TrapCall; |
3516 | 239 | } |
3517 | | |
3518 | | Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E, |
3519 | | LValueBaseInfo *BaseInfo, |
3520 | 56.9k | TBAAAccessInfo *TBAAInfo) { |
3521 | 56.9k | assert(E->getType()->isArrayType() && |
3522 | 56.9k | "Array to pointer decay must have array source type!"); |
3523 | | |
3524 | | // Expressions of array type can't be bitfields or vector elements. |
3525 | 56.9k | LValue LV = EmitLValue(E); |
3526 | 56.9k | Address Addr = LV.getAddress(*this); |
3527 | | |
3528 | | // If the array type was an incomplete type, we need to make sure |
3529 | | // the decay ends up being the right type. |
3530 | 56.9k | llvm::Type *NewTy = ConvertType(E->getType()); |
3531 | 56.9k | Addr = Builder.CreateElementBitCast(Addr, NewTy); |
3532 | | |
3533 | | // Note that VLA pointers are always decayed, so we don't need to do |
3534 | | // anything here. |
3535 | 56.9k | if (!E->getType()->isVariableArrayType()) { |
3536 | 52.4k | assert(isa<llvm::ArrayType>(Addr.getElementType()) && |
3537 | 52.4k | "Expected pointer to array"); |
3538 | 52.4k | Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay"); |
3539 | 52.4k | } |
3540 | | |
3541 | | // The result of this decay conversion points to an array element within the |
3542 | | // base lvalue. However, since TBAA currently does not support representing |
3543 | | // accesses to elements of member arrays, we conservatively represent accesses |
3544 | | // to the pointee object as if it had no any base lvalue specified. |
3545 | | // TODO: Support TBAA for member arrays. |
3546 | 56.9k | QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); |
3547 | 56.9k | if (BaseInfo) *BaseInfo = LV.getBaseInfo()4.60k ; |
3548 | 56.9k | if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType)4.60k ; |
3549 | | |
3550 | 56.9k | return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType)); |
3551 | 56.9k | } |
3552 | | |
3553 | | /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an |
3554 | | /// array to pointer, return the array subexpression. |
3555 | 47.0k | static const Expr *isSimpleArrayDecayOperand(const Expr *E) { |
3556 | | // If this isn't just an array->pointer decay, bail out. |
3557 | 47.0k | const auto *CE = dyn_cast<CastExpr>(E); |
3558 | 47.0k | if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay46.5k ) |
3559 | 25.9k | return nullptr; |
3560 | | |
3561 | | // If this is a decay from variable width array, bail out. |
3562 | 21.0k | const Expr *SubExpr = CE->getSubExpr(); |
3563 | 21.0k | if (SubExpr->getType()->isVariableArrayType()) |
3564 | 2.74k | return nullptr; |
3565 | | |
3566 | 18.3k | return SubExpr; |
3567 | 18.3k | } |
3568 | | |
3569 | | static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF, |
3570 | | llvm::Value *ptr, |
3571 | | ArrayRef<llvm::Value*> indices, |
3572 | | bool inbounds, |
3573 | | bool signedIndices, |
3574 | | SourceLocation loc, |
3575 | 48.7k | const llvm::Twine &name = "arrayidx") { |
3576 | 48.7k | if (inbounds) { |
3577 | 48.7k | return CGF.EmitCheckedInBoundsGEP(ptr, indices, signedIndices, |
3578 | 48.7k | CodeGenFunction::NotSubtraction, loc, |
3579 | 48.7k | name); |
3580 | 5 | } else { |
3581 | 5 | return CGF.Builder.CreateGEP(ptr, indices, name); |
3582 | 5 | } |
3583 | 48.7k | } |
3584 | | |
3585 | | static CharUnits getArrayElementAlign(CharUnits arrayAlign, |
3586 | | llvm::Value *idx, |
3587 | 48.7k | CharUnits eltSize) { |
3588 | | // If we have a constant index, we can use the exact offset of the |
3589 | | // element we're accessing. |
3590 | 48.7k | if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) { |
3591 | 19.8k | CharUnits offset = constantIdx->getZExtValue() * eltSize; |
3592 | 19.8k | return arrayAlign.alignmentAtOffset(offset); |
3593 | | |
3594 | | // Otherwise, use the worst-case alignment for any element. |
3595 | 28.9k | } else { |
3596 | 28.9k | return arrayAlign.alignmentOfArrayElement(eltSize); |
3597 | 28.9k | } |
3598 | 48.7k | } |
3599 | | |
3600 | | static QualType getFixedSizeElementType(const ASTContext &ctx, |
3601 | 56 | const VariableArrayType *vla) { |
3602 | 56 | QualType eltType; |
3603 | 56 | do { |
3604 | 56 | eltType = vla->getElementType(); |
3605 | 56 | } while ((vla = ctx.getAsVariableArrayType(eltType))); |
3606 | 56 | return eltType; |
3607 | 56 | } |
3608 | | |
3609 | | /// Given an array base, check whether its member access belongs to a record |
3610 | | /// with preserve_access_index attribute or not. |
3611 | 19.7k | static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) { |
3612 | 19.7k | if (!ArrayBase || !CGF.getDebugInfo()17.9k ) |
3613 | 14.4k | return false; |
3614 | | |
3615 | | // Only support base as either a MemberExpr or DeclRefExpr. |
3616 | | // DeclRefExpr to cover cases like: |
3617 | | // struct s { int a; int b[10]; }; |
3618 | | // struct s *p; |
3619 | | // p[1].a |
3620 | | // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr. |
3621 | | // p->b[5] is a MemberExpr example. |
3622 | 5.33k | const Expr *E = ArrayBase->IgnoreImpCasts(); |
3623 | 5.33k | if (const auto *ME = dyn_cast<MemberExpr>(E)) |
3624 | 188 | return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); |
3625 | | |
3626 | 5.14k | if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { |
3627 | 4.93k | const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl()); |
3628 | 4.93k | if (!VarDef) |
3629 | 0 | return false; |
3630 | | |
3631 | 4.93k | const auto *PtrT = VarDef->getType()->getAs<PointerType>(); |
3632 | 4.93k | if (!PtrT) |
3633 | 3.14k | return false; |
3634 | | |
3635 | 1.78k | const auto *PointeeT = PtrT->getPointeeType() |
3636 | 1.78k | ->getUnqualifiedDesugaredType(); |
3637 | 1.78k | if (const auto *RecT = dyn_cast<RecordType>(PointeeT)) |
3638 | 20 | return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); |
3639 | 1.76k | return false; |
3640 | 1.76k | } |
3641 | | |
3642 | 211 | return false; |
3643 | 211 | } |
3644 | | |
3645 | | static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, |
3646 | | ArrayRef<llvm::Value *> indices, |
3647 | | QualType eltType, bool inbounds, |
3648 | | bool signedIndices, SourceLocation loc, |
3649 | | QualType *arrayType = nullptr, |
3650 | | const Expr *Base = nullptr, |
3651 | 48.7k | const llvm::Twine &name = "arrayidx") { |
3652 | | // All the indices except that last must be zero. |
3653 | 48.7k | #ifndef NDEBUG |
3654 | 48.7k | for (auto idx : indices.drop_back()) |
3655 | 48.7k | assert(isa<llvm::ConstantInt>(idx) && |
3656 | 48.7k | cast<llvm::ConstantInt>(idx)->isZero()); |
3657 | 48.7k | #endif |
3658 | | |
3659 | | // Determine the element size of the statically-sized base. This is |
3660 | | // the thing that the indices are expressed in terms of. |
3661 | 48.7k | if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) { |
3662 | 56 | eltType = getFixedSizeElementType(CGF.getContext(), vla); |
3663 | 56 | } |
3664 | | |
3665 | | // We can use that to compute the best alignment of the element. |
3666 | 48.7k | CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType); |
3667 | 48.7k | CharUnits eltAlign = |
3668 | 48.7k | getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize); |
3669 | | |
3670 | 48.7k | llvm::Value *eltPtr; |
3671 | 48.7k | auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back()); |
3672 | 48.7k | if (!LastIndex || |
3673 | 48.7k | (19.8k !CGF.IsInPreservedAIRegion19.8k && !IsPreserveAIArrayBase(CGF, Base)19.7k )) { |
3674 | 48.7k | eltPtr = emitArraySubscriptGEP( |
3675 | 48.7k | CGF, addr.getPointer(), indices, inbounds, signedIndices, |
3676 | 48.7k | loc, name); |
3677 | 20 | } else { |
3678 | | // Remember the original array subscript for bpf target |
3679 | 20 | unsigned idx = LastIndex->getZExtValue(); |
3680 | 20 | llvm::DIType *DbgInfo = nullptr; |
3681 | 20 | if (arrayType) |
3682 | 20 | DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc); |
3683 | 20 | eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(addr.getElementType(), |
3684 | 20 | addr.getPointer(), |
3685 | 20 | indices.size() - 1, |
3686 | 20 | idx, DbgInfo); |
3687 | 20 | } |
3688 | | |
3689 | 48.7k | return Address(eltPtr, eltAlign); |
3690 | 48.7k | } |
3691 | | |
3692 | | LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, |
3693 | 46.9k | bool Accessed) { |
3694 | | // The index must always be an integer, which is not an aggregate. Emit it |
3695 | | // in lexical order (this complexity is, sadly, required by C++17). |
3696 | 46.9k | llvm::Value *IdxPre = |
3697 | 46.9k | (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx())21 : nullptr; |
3698 | 46.9k | bool SignedIndices = false; |
3699 | 46.9k | auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * { |
3700 | 46.9k | auto *Idx = IdxPre; |
3701 | 46.9k | if (E->getLHS() != E->getIdx()) { |
3702 | 46.9k | assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS"); |
3703 | 46.9k | Idx = EmitScalarExpr(E->getIdx()); |
3704 | 46.9k | } |
3705 | | |
3706 | 46.9k | QualType IdxTy = E->getIdx()->getType(); |
3707 | 46.9k | bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); |
3708 | 46.9k | SignedIndices |= IdxSigned; |
3709 | | |
3710 | 46.9k | if (SanOpts.has(SanitizerKind::ArrayBounds)) |
3711 | 37 | EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed); |
3712 | | |
3713 | | // Extend or truncate the index type to 32 or 64-bits. |
3714 | 46.9k | if (Promote && Idx->getType() != IntPtrTy46.4k ) |
3715 | 37.3k | Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); |
3716 | | |
3717 | 46.9k | return Idx; |
3718 | 46.9k | }; |
3719 | 46.9k | IdxPre = nullptr; |
3720 | | |
3721 | | // If the base is a vector type, then we are forming a vector element lvalue |
3722 | | // with this subscript. |
3723 | 46.9k | if (E->getBase()->getType()->isVectorType() && |
3724 | 443 | !isa<ExtVectorElementExpr>(E->getBase())) { |
3725 | | // Emit the vector as an lvalue to get its address. |
3726 | 442 | LValue LHS = EmitLValue(E->getBase()); |
3727 | 442 | auto *Idx = EmitIdxAfterBase(/*Promote*/false); |
3728 | 442 | assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); |
3729 | 442 | return LValue::MakeVectorElt(LHS.getAddress(*this), Idx, |
3730 | 442 | E->getBase()->getType(), LHS.getBaseInfo(), |
3731 | 442 | TBAAAccessInfo()); |
3732 | 442 | } |
3733 | | |
3734 | | // All the other cases basically behave like simple offsetting. |
3735 | | |
3736 | | // Handle the extvector case we ignored above. |
3737 | 46.4k | if (isa<ExtVectorElementExpr>(E->getBase())) { |
3738 | 1 | LValue LV = EmitLValue(E->getBase()); |
3739 | 1 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
3740 | 1 | Address Addr = EmitExtVectorElementLValue(LV); |
3741 | | |
3742 | 1 | QualType EltType = LV.getType()->castAs<VectorType>()->getElementType(); |
3743 | 1 | Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true, |
3744 | 1 | SignedIndices, E->getExprLoc()); |
3745 | 1 | return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(), |
3746 | 1 | CGM.getTBAAInfoForSubobject(LV, EltType)); |
3747 | 1 | } |
3748 | | |
3749 | 46.4k | LValueBaseInfo EltBaseInfo; |
3750 | 46.4k | TBAAAccessInfo EltTBAAInfo; |
3751 | 46.4k | Address Addr = Address::invalid(); |
3752 | 46.4k | if (const VariableArrayType *vla = |
3753 | 1.56k | getContext().getAsVariableArrayType(E->getType())) { |
3754 | | // The base must be a pointer, which is not an aggregate. Emit |
3755 | | // it. It needs to be emitted first in case it's what captures |
3756 | | // the VLA bounds. |
3757 | 1.56k | Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); |
3758 | 1.56k | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
3759 | | |
3760 | | // The element count here is the total number of non-VLA elements. |
3761 | 1.56k | llvm::Value *numElements = getVLASize(vla).NumElts; |
3762 | | |
3763 | | // Effectively, the multiply by the VLA size is part of the GEP. |
3764 | | // GEP indexes are signed, and scaling an index isn't permitted to |
3765 | | // signed-overflow, so we use the same semantics for our explicit |
3766 | | // multiply. We suppress this if overflow is not undefined behavior. |
3767 | 1.56k | if (getLangOpts().isSignedOverflowDefined()) { |
3768 | 0 | Idx = Builder.CreateMul(Idx, numElements); |
3769 | 1.56k | } else { |
3770 | 1.56k | Idx = Builder.CreateNSWMul(Idx, numElements); |
3771 | 1.56k | } |
3772 | | |
3773 | 1.56k | Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(), |
3774 | 1.56k | !getLangOpts().isSignedOverflowDefined(), |
3775 | 1.56k | SignedIndices, E->getExprLoc()); |
3776 | | |
3777 | 44.9k | } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ |
3778 | | // Indexing over an interface, as in "NSString *P; P[4];" |
3779 | | |
3780 | | // Emit the base pointer. |
3781 | 5 | Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); |
3782 | 5 | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
3783 | | |
3784 | 5 | CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT); |
3785 | 5 | llvm::Value *InterfaceSizeVal = |
3786 | 5 | llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity()); |
3787 | | |
3788 | 5 | llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal); |
3789 | | |
3790 | | // We don't necessarily build correct LLVM struct types for ObjC |
3791 | | // interfaces, so we can't rely on GEP to do this scaling |
3792 | | // correctly, so we need to cast to i8*. FIXME: is this actually |
3793 | | // true? A lot of other things in the fragile ABI would break... |
3794 | 5 | llvm::Type *OrigBaseTy = Addr.getType(); |
3795 | 5 | Addr = Builder.CreateElementBitCast(Addr, Int8Ty); |
3796 | | |
3797 | | // Do the GEP. |
3798 | 5 | CharUnits EltAlign = |
3799 | 5 | getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize); |
3800 | 5 | llvm::Value *EltPtr = |
3801 | 5 | emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false, |
3802 | 5 | SignedIndices, E->getExprLoc()); |
3803 | 5 | Addr = Address(EltPtr, EltAlign); |
3804 | | |
3805 | | // Cast back. |
3806 | 5 | Addr = Builder.CreateBitCast(Addr, OrigBaseTy); |
3807 | 44.9k | } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { |
3808 | | // If this is A[i] where A is an array, the frontend will have decayed the |
3809 | | // base to be a ArrayToPointerDecay implicit cast. While correct, it is |
3810 | | // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a |
3811 | | // "gep x, i" here. Emit one "gep A, 0, i". |
3812 | 17.5k | assert(Array->getType()->isArrayType() && |
3813 | 17.5k | "Array to pointer decay must have array source type!"); |
3814 | 17.5k | LValue ArrayLV; |
3815 | | // For simple multidimensional array indexing, set the 'accessed' flag for |
3816 | | // better bounds-checking of the base expression. |
3817 | 17.5k | if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array)) |
3818 | 1.34k | ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); |
3819 | 16.2k | else |
3820 | 16.2k | ArrayLV = EmitLValue(Array); |
3821 | 17.5k | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
3822 | | |
3823 | | // Propagate the alignment from the array itself to the result. |
3824 | 17.5k | QualType arrayType = Array->getType(); |
3825 | 17.5k | Addr = emitArraySubscriptGEP( |
3826 | 17.5k | *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx}, |
3827 | 17.5k | E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, |
3828 | 17.5k | E->getExprLoc(), &arrayType, E->getBase()); |
3829 | 17.5k | EltBaseInfo = ArrayLV.getBaseInfo(); |
3830 | 17.5k | EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType()); |
3831 | 27.3k | } else { |
3832 | | // The base must be a pointer; emit it with an estimate of its alignment. |
3833 | 27.3k | Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); |
3834 | 27.3k | auto *Idx = EmitIdxAfterBase(/*Promote*/true); |
3835 | 27.3k | QualType ptrType = E->getBase()->getType(); |
3836 | 27.3k | Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(), |
3837 | 27.3k | !getLangOpts().isSignedOverflowDefined(), |
3838 | 27.3k | SignedIndices, E->getExprLoc(), &ptrType, |
3839 | 27.3k | E->getBase()); |
3840 | 27.3k | } |
3841 | | |
3842 | 46.4k | LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo); |
3843 | | |
3844 | 46.4k | if (getLangOpts().ObjC && |
3845 | 18.4k | getLangOpts().getGC() != LangOptions::NonGC) { |
3846 | 127 | LV.setNonGC(!E->isOBJCGCCandidate(getContext())); |
3847 | 127 | setObjCGCLValueClass(getContext(), E, LV); |
3848 | 127 | } |
3849 | 46.4k | return LV; |
3850 | 46.4k | } |
3851 | | |
3852 | 17 | LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) { |
3853 | 17 | assert( |
3854 | 17 | !E->isIncomplete() && |
3855 | 17 | "incomplete matrix subscript expressions should be rejected during Sema"); |
3856 | 17 | LValue Base = EmitLValue(E->getBase()); |
3857 | 17 | llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx()); |
3858 | 17 | llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx()); |
3859 | 17 | llvm::Value *NumRows = Builder.getIntN( |
3860 | 17 | RowIdx->getType()->getScalarSizeInBits(), |
3861 | 17 | E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows()); |
3862 | 17 | llvm::Value *FinalIdx = |
3863 | 17 | Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx); |
3864 | 17 | return LValue::MakeMatrixElt( |
3865 | 17 | MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx, |
3866 | 17 | E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo()); |
3867 | 17 | } |
3868 | | |
3869 | | static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, |
3870 | | LValueBaseInfo &BaseInfo, |
3871 | | TBAAAccessInfo &TBAAInfo, |
3872 | | QualType BaseTy, QualType ElTy, |
3873 | 1.47k | bool IsLowerBound) { |
3874 | 1.47k | LValue BaseLVal; |
3875 | 1.47k | if (auto *ASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParenImpCasts())) { |
3876 | 364 | BaseLVal = CGF.EmitOMPArraySectionExpr(ASE, IsLowerBound); |
3877 | 364 | if (BaseTy->isArrayType()) { |
3878 | 124 | Address Addr = BaseLVal.getAddress(CGF); |
3879 | 124 | BaseInfo = BaseLVal.getBaseInfo(); |
3880 | | |
3881 | | // If the array type was an incomplete type, we need to make sure |
3882 | | // the decay ends up being the right type. |
3883 | 124 | llvm::Type *NewTy = CGF.ConvertType(BaseTy); |
3884 | 124 | Addr = CGF.Builder.CreateElementBitCast(Addr, NewTy); |
3885 | | |
3886 | | // Note that VLA pointers are always decayed, so we don't need to do |
3887 | | // anything here. |
3888 | 124 | if (!BaseTy->isVariableArrayType()) { |
3889 | 76 | assert(isa<llvm::ArrayType>(Addr.getElementType()) && |
3890 | 76 | "Expected pointer to array"); |
3891 | 76 | Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay"); |
3892 | 76 | } |
3893 | | |
3894 | 124 | return CGF.Builder.CreateElementBitCast(Addr, |
3895 | 124 | CGF.ConvertTypeForMem(ElTy)); |
3896 | 124 | } |
3897 | 240 | LValueBaseInfo TypeBaseInfo; |
3898 | 240 | TBAAAccessInfo TypeTBAAInfo; |
3899 | 240 | CharUnits Align = |
3900 | 240 | CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo); |
3901 | 240 | BaseInfo.mergeForCast(TypeBaseInfo); |
3902 | 240 | TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo); |
3903 | 240 | return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)), Align); |
3904 | 240 | } |
3905 | 1.11k | return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo); |
3906 | 1.11k | } |
3907 | | |
3908 | | LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E, |
3909 | 2.24k | bool IsLowerBound) { |
3910 | 2.24k | QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(E->getBase()); |
3911 | 2.24k | QualType ResultExprTy; |
3912 | 2.24k | if (auto *AT = getContext().getAsArrayType(BaseTy)) |
3913 | 1.20k | ResultExprTy = AT->getElementType(); |
3914 | 1.03k | else |
3915 | 1.03k | ResultExprTy = BaseTy->getPointeeType(); |
3916 | 2.24k | llvm::Value *Idx = nullptr; |
3917 | 2.24k | if (IsLowerBound || E->getColonLocFirst().isInvalid()389 ) { |
3918 | | // Requesting lower bound or upper bound, but without provided length and |
3919 | | // without ':' symbol for the default length -> length = 1. |
3920 | | // Idx = LowerBound ?: 0; |
3921 | 1.86k | if (auto *LowerBound = E->getLowerBound()) { |
3922 | 880 | Idx = Builder.CreateIntCast( |
3923 | 880 | EmitScalarExpr(LowerBound), IntPtrTy, |
3924 | 880 | LowerBound->getType()->hasSignedIntegerRepresentation()); |
3925 | 880 | } else |
3926 | 983 | Idx = llvm::ConstantInt::getNullValue(IntPtrTy); |
3927 | 379 | } else { |
3928 | | // Try to emit length or lower bound as constant. If this is possible, 1 |
3929 | | // is subtracted from constant length or lower bound. Otherwise, emit LLVM |
3930 | | // IR (LB + Len) - 1. |
3931 | 379 | auto &C = CGM.getContext(); |
3932 | 379 | auto *Length = E->getLength(); |
3933 | 379 | llvm::APSInt ConstLength; |
3934 | 379 | if (Length) { |
3935 | | // Idx = LowerBound + Length - 1; |
3936 | 357 | if (Optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) { |
3937 | 179 | ConstLength = CL->zextOrTrunc(PointerWidthInBits); |
3938 | 179 | Length = nullptr; |
3939 | 179 | } |
3940 | 357 | auto *LowerBound = E->getLowerBound(); |
3941 | 357 | llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false); |
3942 | 357 | if (LowerBound) { |
3943 | 230 | if (Optional<llvm::APSInt> LB = LowerBound->getIntegerConstantExpr(C)) { |
3944 | 230 | ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits); |
3945 | 230 | LowerBound = nullptr; |
3946 | 230 | } |
3947 | 230 | } |
3948 | 357 | if (!Length) |
3949 | 179 | --ConstLength; |
3950 | 178 | else if (!LowerBound) |
3951 | 178 | --ConstLowerBound; |
3952 | | |
3953 | 357 | if (Length || LowerBound179 ) { |
3954 | 178 | auto *LowerBoundVal = |
3955 | 178 | LowerBound |
3956 | 0 | ? Builder.CreateIntCast( |
3957 | 0 | EmitScalarExpr(LowerBound), IntPtrTy, |
3958 | 0 | LowerBound->getType()->hasSignedIntegerRepresentation()) |
3959 | 178 | : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound); |
3960 | 178 | auto *LengthVal = |
3961 | 178 | Length |
3962 | 178 | ? Builder.CreateIntCast( |
3963 | 178 | EmitScalarExpr(Length), IntPtrTy, |
3964 | 178 | Length->getType()->hasSignedIntegerRepresentation()) |
3965 | 0 | : llvm::ConstantInt::get(IntPtrTy, ConstLength); |
3966 | 178 | Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len", |
3967 | 178 | /*HasNUW=*/false, |
3968 | 178 | !getLangOpts().isSignedOverflowDefined()); |
3969 | 178 | if (Length && LowerBound) { |
3970 | 0 | Idx = Builder.CreateSub( |
3971 | 0 | Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1", |
3972 | 0 | /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined()); |
3973 | 0 | } |
3974 | 178 | } else |
3975 | 179 | Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound); |
3976 | 22 | } else { |
3977 | | // Idx = ArraySize - 1; |
3978 | 22 | QualType ArrayTy = BaseTy->isPointerType() |
3979 | 0 | ? E->getBase()->IgnoreParenImpCasts()->getType() |
3980 | 22 | : BaseTy; |
3981 | 22 | if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) { |
3982 | 16 | Length = VAT->getSizeExpr(); |
3983 | 16 | if (Optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) { |
3984 | 12 | ConstLength = *L; |
3985 | 12 | Length = nullptr; |
3986 | 12 | } |
3987 | 6 | } else { |
3988 | 6 | auto *CAT = C.getAsConstantArrayType(ArrayTy); |
3989 | 6 | ConstLength = CAT->getSize(); |
3990 | 6 | } |
3991 | 22 | if (Length) { |
3992 | 4 | auto *LengthVal = Builder.CreateIntCast( |
3993 | 4 | EmitScalarExpr(Length), IntPtrTy, |
3994 | 4 | Length->getType()->hasSignedIntegerRepresentation()); |
3995 | 4 | Idx = Builder.CreateSub( |
3996 | 4 | LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1", |
3997 | 4 | /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined()); |
3998 | 18 | } else { |
3999 | 18 | ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits); |
4000 | 18 | --ConstLength; |
4001 | 18 | Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength); |
4002 | 18 | } |
4003 | 22 | } |
4004 | 379 | } |
4005 | 2.24k | assert(Idx); |
4006 | | |
4007 | 2.24k | Address EltPtr = Address::invalid(); |
4008 | 2.24k | LValueBaseInfo BaseInfo; |
4009 | 2.24k | TBAAAccessInfo TBAAInfo; |
4010 | 2.24k | if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) { |
4011 | | // The base must be a pointer, which is not an aggregate. Emit |
4012 | | // it. It needs to be emitted first in case it's what captures |
4013 | | // the VLA bounds. |
4014 | 104 | Address Base = |
4015 | 104 | emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, |
4016 | 104 | BaseTy, VLA->getElementType(), IsLowerBound); |
4017 | | // The element count here is the total number of non-VLA elements. |
4018 | 104 | llvm::Value *NumElements = getVLASize(VLA).NumElts; |
4019 | | |
4020 | | // Effectively, the multiply by the VLA size is part of the GEP. |
4021 | | // GEP indexes are signed, and scaling an index isn't permitted to |
4022 | | // signed-overflow, so we use the same semantics for our explicit |
4023 | | // multiply. We suppress this if overflow is not undefined behavior. |
4024 | 104 | if (getLangOpts().isSignedOverflowDefined()) |
4025 | 0 | Idx = Builder.CreateMul(Idx, NumElements); |
4026 | 104 | else |
4027 | 104 | Idx = Builder.CreateNSWMul(Idx, NumElements); |
4028 | 104 | EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(), |
4029 | 104 | !getLangOpts().isSignedOverflowDefined(), |
4030 | 104 | /*signedIndices=*/false, E->getExprLoc()); |
4031 | 2.13k | } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { |
4032 | | // If this is A[i] where A is an array, the frontend will have decayed the |
4033 | | // base to be a ArrayToPointerDecay implicit cast. While correct, it is |
4034 | | // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a |
4035 | | // "gep x, i" here. Emit one "gep A, 0, i". |
4036 | 764 | assert(Array->getType()->isArrayType() && |
4037 | 764 | "Array to pointer decay must have array source type!"); |
4038 | 764 | LValue ArrayLV; |
4039 | | // For simple multidimensional array indexing, set the 'accessed' flag for |
4040 | | // better bounds-checking of the base expression. |
4041 | 764 | if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array)) |
4042 | 96 | ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); |
4043 | 668 | else |
4044 | 668 | ArrayLV = EmitLValue(Array); |
4045 | | |
4046 | | // Propagate the alignment from the array itself to the result. |
4047 | 764 | EltPtr = emitArraySubscriptGEP( |
4048 | 764 | *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx}, |
4049 | 764 | ResultExprTy, !getLangOpts().isSignedOverflowDefined(), |
4050 | 764 | /*signedIndices=*/false, E->getExprLoc()); |
4051 | 764 | BaseInfo = ArrayLV.getBaseInfo(); |
4052 | 764 | TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy); |
4053 | 1.37k | } else { |
4054 | 1.37k | Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, |
4055 | 1.37k | TBAAInfo, BaseTy, ResultExprTy, |
4056 | 1.37k | IsLowerBound); |
4057 | 1.37k | EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy, |
4058 | 1.37k | !getLangOpts().isSignedOverflowDefined(), |
4059 | 1.37k | /*signedIndices=*/false, E->getExprLoc()); |
4060 | 1.37k | } |
4061 | | |
4062 | 2.24k | return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo); |
4063 | 2.24k | } |
4064 | | |
4065 | | LValue CodeGenFunction:: |
4066 | 284 | EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { |
4067 | | // Emit the base vector as an l-value. |
4068 | 284 | LValue Base; |
4069 | | |
4070 | | // ExtVectorElementExpr's base can either be a vector or pointer to vector. |
4071 | 284 | if (E->isArrow()) { |
4072 | | // If it is a pointer to a vector, emit the address and form an lvalue with |
4073 | | // it. |
4074 | 1 | LValueBaseInfo BaseInfo; |
4075 | 1 | TBAAAccessInfo TBAAInfo; |
4076 | 1 | Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo); |
4077 | 1 | const auto *PT = E->getBase()->getType()->castAs<PointerType>(); |
4078 | 1 | Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo); |
4079 | 1 | Base.getQuals().removeObjCGCAttr(); |
4080 | 283 | } else if (E->getBase()->isGLValue()) { |
4081 | | // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), |
4082 | | // emit the base as an lvalue. |
4083 | 275 | assert(E->getBase()->getType()->isVectorType()); |
4084 | 275 | Base = EmitLValue(E->getBase()); |
4085 | 8 | } else { |
4086 | | // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. |
4087 | 8 | assert(E->getBase()->getType()->isVectorType() && |
4088 | 8 | "Result must be a vector"); |
4089 | 8 | llvm::Value *Vec = EmitScalarExpr(E->getBase()); |
4090 | | |
4091 | | // Store the vector to memory (because LValue wants an address). |
4092 | 8 | Address VecMem = CreateMemTemp(E->getBase()->getType()); |
4093 | 8 | Builder.CreateStore(Vec, VecMem); |
4094 | 8 | Base = MakeAddrLValue(VecMem, E->getBase()->getType(), |
4095 | 8 | AlignmentSource::Decl); |
4096 | 8 | } |
4097 | | |
4098 | 284 | QualType type = |
4099 | 284 | E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); |
4100 | | |
4101 | | // Encode the element access list into a vector of unsigned indices. |
4102 | 284 | SmallVector<uint32_t, 4> Indices; |
4103 | 284 | E->getEncodedElementAccess(Indices); |
4104 | | |
4105 | 284 | if (Base.isSimple()) { |
4106 | 272 | llvm::Constant *CV = |
4107 | 272 | llvm::ConstantDataVector::get(getLLVMContext(), Indices); |
4108 | 272 | return LValue::MakeExtVectorElt(Base.getAddress(*this), CV, type, |
4109 | 272 | Base.getBaseInfo(), TBAAAccessInfo()); |
4110 | 272 | } |
4111 | 12 | assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); |
4112 | | |
4113 | 12 | llvm::Constant *BaseElts = Base.getExtVectorElts(); |
4114 | 12 | SmallVector<llvm::Constant *, 4> CElts; |
4115 | | |
4116 | 24 | for (unsigned i = 0, e = Indices.size(); i != e; ++i12 ) |
4117 | 12 | CElts.push_back(BaseElts->getAggregateElement(Indices[i])); |
4118 | 12 | llvm::Constant *CV = llvm::ConstantVector::get(CElts); |
4119 | 12 | return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type, |
4120 | 12 | Base.getBaseInfo(), TBAAAccessInfo()); |
4121 | 12 | } |
4122 | | |
4123 | 129k | LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { |
4124 | 129k | if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) { |
4125 | 28 | EmitIgnoredExpr(E->getBase()); |
4126 | 28 | return EmitDeclRefLValue(DRE); |
4127 | 28 | } |
4128 | | |
4129 | 129k | Expr *BaseExpr = E->getBase(); |
4130 | | // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. |
4131 | 129k | LValue BaseLV; |
4132 | 129k | if (E->isArrow()) { |
4133 | 89.1k | LValueBaseInfo BaseInfo; |
4134 | 89.1k | TBAAAccessInfo TBAAInfo; |
4135 | 89.1k | Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo); |
4136 | 89.1k | QualType PtrTy = BaseExpr->getType()->getPointeeType(); |
4137 | 89.1k | SanitizerSet SkippedChecks; |
4138 | 89.1k | bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr); |
4139 | 89.1k | if (IsBaseCXXThis) |
4140 | 40.3k | SkippedChecks.set(SanitizerKind::Alignment, true); |
4141 | 89.1k | if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr)48.7k ) |
4142 | 40.3k | SkippedChecks.set(SanitizerKind::Null, true); |
4143 | 89.1k | EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy, |
4144 | 89.1k | /*Alignment=*/CharUnits::Zero(), SkippedChecks); |
4145 | 89.1k | BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo); |
4146 | 89.1k | } else |
4147 | 40.3k | BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess); |
4148 | | |
4149 | 129k | NamedDecl *ND = E->getMemberDecl(); |
4150 | 129k | if (auto *Field = dyn_cast<FieldDecl>(ND)) { |
4151 | 129k | LValue LV = EmitLValueForField(BaseLV, Field); |
4152 | 129k | setObjCGCLValueClass(getContext(), E, LV); |
4153 | 129k | if (getLangOpts().OpenMP) { |
4154 | | // If the member was explicitly marked as nontemporal, mark it as |
4155 | | // nontemporal. If the base lvalue is marked as nontemporal, mark access |
4156 | | // to children as nontemporal too. |
4157 | 8.42k | if ((IsWrappedCXXThis(BaseExpr) && |
4158 | 4.52k | CGM.getOpenMPRuntime().isNontemporalDecl(Field)) || |
4159 | 8.36k | BaseLV.isNontemporal()) |
4160 | 64 | LV.setNontemporal(/*Value=*/true); |
4161 | 8.42k | } |
4162 | 129k | return LV; |
4163 | 129k | } |
4164 | | |
4165 | 0 | if (const auto *FD = dyn_cast<FunctionDecl>(ND)) |
4166 | 0 | return EmitFunctionDeclLValue(*this, E, FD); |
4167 | | |
4168 | 0 | llvm_unreachable("Unhandled member declaration!"); |
4169 | 0 | } |
4170 | | |
4171 | | /// Given that we are currently emitting a lambda, emit an l-value for |
4172 | | /// one of its members. |
4173 | 106 | LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) { |
4174 | 106 | assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda()); |
4175 | 106 | assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent()); |
4176 | 106 | QualType LambdaTagType = |
4177 | 106 | getContext().getTagDeclType(Field->getParent()); |
4178 | 106 | LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType); |
4179 | 106 | return EmitLValueForField(LambdaLV, Field); |
4180 | 106 | } |
4181 | | |
4182 | | /// Get the field index in the debug info. The debug info structure/union |
4183 | | /// will ignore the unnamed bitfields. |
4184 | | unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec, |
4185 | 57 | unsigned FieldIndex) { |
4186 | 57 | unsigned I = 0, Skipped = 0; |
4187 | | |
4188 | 84 | for (auto F : Rec->getDefinition()->fields()) { |
4189 | 84 | if (I == FieldIndex) |
4190 | 57 | break; |
4191 | 27 | if (F->isUnnamedBitfield()) |
4192 | 2 | Skipped++; |
4193 | 27 | I++; |
4194 | 27 | } |
4195 | | |
4196 | 57 | return FieldIndex - Skipped; |
4197 | 57 | } |
4198 | | |
4199 | | /// Get the address of a zero-sized field within a record. The resulting |
4200 | | /// address doesn't necessarily have the right type. |
4201 | | static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, |
4202 | 22 | const FieldDecl *Field) { |
4203 | 22 | CharUnits Offset = CGF.getContext().toCharUnitsFromBits( |
4204 | 22 | CGF.getContext().getFieldOffset(Field)); |
4205 | 22 | if (Offset.isZero()) |
4206 | 20 | return Base; |
4207 | 2 | Base = CGF.Builder.CreateElementBitCast(Base, CGF.Int8Ty); |
4208 | 2 | return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset); |
4209 | 2 | } |
4210 | | |
4211 | | /// Drill down to the storage of a field without walking into |
4212 | | /// reference types. |
4213 | | /// |
4214 | | /// The resulting address doesn't necessarily have the right type. |
4215 | | static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, |
4216 | 167k | const FieldDecl *field) { |
4217 | 167k | if (field->isZeroSize(CGF.getContext())) |
4218 | 22 | return emitAddrOfZeroSizeField(CGF, base, field); |
4219 | | |
4220 | 167k | const RecordDecl *rec = field->getParent(); |
4221 | | |
4222 | 167k | unsigned idx = |
4223 | 167k | CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); |
4224 | | |
4225 | 167k | return CGF.Builder.CreateStructGEP(base, idx, field->getName()); |
4226 | 167k | } |
4227 | | |
4228 | | static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, |
4229 | 35 | Address addr, const FieldDecl *field) { |
4230 | 35 | const RecordDecl *rec = field->getParent(); |
4231 | 35 | llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType( |
4232 | 35 | base.getType(), rec->getLocation()); |
4233 | | |
4234 | 35 | unsigned idx = |
4235 | 35 | CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); |
4236 | | |
4237 | 35 | return CGF.Builder.CreatePreserveStructAccessIndex( |
4238 | 35 | addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo); |
4239 | 35 | } |
4240 | | |
4241 | 16 | static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { |
4242 | 16 | const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl(); |
4243 | 16 | if (!RD) |
4244 | 3 | return false; |
4245 | | |
4246 | 13 | if (RD->isDynamicClass()) |
4247 | 6 | return true; |
4248 | | |
4249 | 7 | for (const auto &Base : RD->bases()) |
4250 | 2 | if (hasAnyVptr(Base.getType(), Context)) |
4251 | 2 | return true; |
4252 | | |
4253 | 5 | for (const FieldDecl *Field : RD->fields()) |
4254 | 4 | if (hasAnyVptr(Field->getType(), Context)) |
4255 | 3 | return true; |
4256 | | |
4257 | 2 | return false; |
4258 | 5 | } |
4259 | | |
4260 | | LValue CodeGenFunction::EmitLValueForField(LValue base, |
4261 | 170k | const FieldDecl *field) { |
4262 | 170k | LValueBaseInfo BaseInfo = base.getBaseInfo(); |
4263 | | |
4264 | 170k | if (field->isBitField()) { |
4265 | 1.56k | const CGRecordLayout &RL = |
4266 | 1.56k | CGM.getTypes().getCGRecordLayout(field->getParent()); |
4267 | 1.56k | const CGBitFieldInfo &Info = RL.getBitFieldInfo(field); |
4268 | 1.56k | const bool UseVolatile = isAAPCS(CGM.getTarget()) && |
4269 | 481 | CGM.getCodeGenOpts().AAPCSBitfieldWidth && |
4270 | 277 | Info.VolatileStorageSize != 0 && |
4271 | 171 | field->getType() |
4272 | 171 | .withCVRQualifiers(base.getVRQualifiers()) |
4273 | 171 | .isVolatileQualified(); |
4274 | 1.56k | Address Addr = base.getAddress(*this); |
4275 | 1.56k | unsigned Idx = RL.getLLVMFieldNo(field); |
4276 | 1.56k | const RecordDecl *rec = field->getParent(); |
4277 | 1.56k | if (!UseVolatile) { |
4278 | 1.48k | if (!IsInPreservedAIRegion && |
4279 | 1.47k | (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()232 )) { |
4280 | 1.47k | if (Idx != 0) |
4281 | | // For structs, we GEP to the field that the record layout suggests. |
4282 | 478 | Addr = Builder.CreateStructGEP(Addr, Idx, field->getName()); |
4283 | 3 | } else { |
4284 | 3 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType( |
4285 | 3 | getContext().getRecordType(rec), rec->getLocation()); |
4286 | 3 | Addr = Builder.CreatePreserveStructAccessIndex( |
4287 | 3 | Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()), |
4288 | 3 | DbgInfo); |
4289 | 3 | } |
4290 | 1.48k | } |
4291 | 1.56k | const unsigned SS = |
4292 | 1.48k | UseVolatile ? Info.VolatileStorageSize78 : Info.StorageSize; |
4293 | | // Get the access type. |
4294 | 1.56k | llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS); |
4295 | 1.56k | if (Addr.getElementType() != FieldIntTy) |
4296 | 1.20k | Addr = Builder.CreateElementBitCast(Addr, FieldIntTy); |
4297 | 1.56k | if (UseVolatile) { |
4298 | 78 | const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity(); |
4299 | 78 | if (VolatileOffset) |
4300 | 28 | Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset); |
4301 | 78 | } |
4302 | | |
4303 | 1.56k | QualType fieldType = |
4304 | 1.56k | field->getType().withCVRQualifiers(base.getVRQualifiers()); |
4305 | | // TODO: Support TBAA for bit fields. |
4306 | 1.56k | LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); |
4307 | 1.56k | return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo, |
4308 | 1.56k | TBAAAccessInfo()); |
4309 | 1.56k | } |
4310 | | |
4311 | | // Fields of may-alias structures are may-alias themselves. |
4312 | | // FIXME: this should get propagated down through anonymous structs |
4313 | | // and unions. |
4314 | 168k | QualType FieldType = field->getType(); |
4315 | 168k | const RecordDecl *rec = field->getParent(); |
4316 | 168k | AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); |
4317 | 168k | LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource)); |
4318 | 168k | TBAAAccessInfo FieldTBAAInfo; |
4319 | 168k | if (base.getTBAAInfo().isMayAlias() || |
4320 | 167k | rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()167k ) { |
4321 | 1.09k | FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); |
4322 | 167k | } else if (rec->isUnion()) { |
4323 | | // TODO: Support TBAA for unions. |
4324 | 4.99k | FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); |
4325 | 162k | } else { |
4326 | | // If no base type been assigned for the base access, then try to generate |
4327 | | // one for this base lvalue. |
4328 | 162k | FieldTBAAInfo = base.getTBAAInfo(); |
4329 | 162k | if (!FieldTBAAInfo.BaseType) { |
4330 | 162k | FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType()); |
4331 | 162k | assert(!FieldTBAAInfo.Offset && |
4332 | 162k | "Nonzero offset for an access with no base type!"); |
4333 | 162k | } |
4334 | | |
4335 | | // Adjust offset to be relative to the base type. |
4336 | 162k | const ASTRecordLayout &Layout = |
4337 | 162k | getContext().getASTRecordLayout(field->getParent()); |
4338 | 162k | unsigned CharWidth = getContext().getCharWidth(); |
4339 | 162k | if (FieldTBAAInfo.BaseType) |
4340 | 2.38k | FieldTBAAInfo.Offset += |
4341 | 2.38k | Layout.getFieldOffset(field->getFieldIndex()) / CharWidth; |
4342 | | |
4343 | | // Update the final access type and size. |
4344 | 162k | FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType); |
4345 | 162k | FieldTBAAInfo.Size = |
4346 | 162k | getContext().getTypeSizeInChars(FieldType).getQuantity(); |
4347 | 162k | } |
4348 | | |
4349 | 168k | Address addr = base.getAddress(*this); |
4350 | 168k | if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) { |
4351 | 162k | if (CGM.getCodeGenOpts().StrictVTablePointers && |
4352 | 18 | ClassDef->isDynamicClass()) { |
4353 | | // Getting to any field of dynamic object requires stripping dynamic |
4354 | | // information provided by invariant.group. This is because accessing |
4355 | | // fields may leak the real address of dynamic object, which could result |
4356 | | // in miscompilation when leaked pointer would be compared. |
4357 | 5 | auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer()); |
4358 | 5 | addr = Address(stripped, addr.getAlignment()); |
4359 | 5 | } |
4360 | 162k | } |
4361 | | |
4362 | 168k | unsigned RecordCVR = base.getVRQualifiers(); |
4363 | 168k | if (rec->isUnion()) { |
4364 | | // For unions, there is no pointer adjustment. |
4365 | 5.20k | if (CGM.getCodeGenOpts().StrictVTablePointers && |
4366 | 10 | hasAnyVptr(FieldType, getContext())) |
4367 | | // Because unions can easily skip invariant.barriers, we need to add |
4368 | | // a barrier every time CXXRecord field with vptr is referenced. |
4369 | 6 | addr = Address(Builder.CreateLaunderInvariantGroup(addr.getPointer()), |
4370 | 6 | addr.getAlignment()); |
4371 | | |
4372 | 5.20k | if (IsInPreservedAIRegion || |
4373 | 5.19k | (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>()3.47k )) { |
4374 | | // Remember the original union field index |
4375 | 19 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(), |
4376 | 19 | rec->getLocation()); |
4377 | 19 | addr = Address( |
4378 | 19 | Builder.CreatePreserveUnionAccessIndex( |
4379 | 19 | addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo), |
4380 | 19 | addr.getAlignment()); |
4381 | 19 | } |
4382 | | |
4383 | 5.20k | if (FieldType->isReferenceType()) |
4384 | 3 | addr = Builder.CreateElementBitCast( |
4385 | 3 | addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName()); |
4386 | 163k | } else { |
4387 | 163k | if (!IsInPreservedAIRegion && |
4388 | 163k | (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()111k )) |
4389 | | // For structs, we GEP to the field that the record layout suggests. |
4390 | 163k | addr = emitAddrOfFieldStorage(*this, addr, field); |
4391 | 35 | else |
4392 | | // Remember the original struct field index |
4393 | 35 | addr = emitPreserveStructAccess(*this, base, addr, field); |
4394 | 163k | } |
4395 | | |
4396 | | // If this is a reference field, load the reference right now. |
4397 | 168k | if (FieldType->isReferenceType()) { |
4398 | 5.74k | LValue RefLVal = |
4399 | 5.74k | MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo); |
4400 | 5.74k | if (RecordCVR & Qualifiers::Volatile) |
4401 | 0 | RefLVal.getQuals().addVolatile(); |
4402 | 5.74k | addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo); |
4403 | | |
4404 | | // Qualifiers on the struct don't apply to the referencee. |
4405 | 5.74k | RecordCVR = 0; |
4406 | 5.74k | FieldType = FieldType->getPointeeType(); |
4407 | 5.74k | } |
4408 | | |
4409 | | // Make sure that the address is pointing to the right type. This is critical |
4410 | | // for both unions and structs. A union needs a bitcast, a struct element |
4411 | | // will need a bitcast if the LLVM type laid out doesn't match the desired |
4412 | | // type. |
4413 | 168k | addr = Builder.CreateElementBitCast( |
4414 | 168k | addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName()); |
4415 | | |
4416 | 168k | if (field->hasAttr<AnnotateAttr>()) |
4417 | 4 | addr = EmitFieldAnnotations(field, addr); |
4418 | | |
4419 | 168k | LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo); |
4420 | 168k | LV.getQuals().addCVRQualifiers(RecordCVR); |
4421 | | |
4422 | | // __weak attribute on a field is ignored. |
4423 | 168k | if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) |
4424 | 0 | LV.getQuals().removeObjCGCAttr(); |
4425 | | |
4426 | 168k | return LV; |
4427 | 168k | } |
4428 | | |
4429 | | LValue |
4430 | | CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, |
4431 | 22.9k | const FieldDecl *Field) { |
4432 | 22.9k | QualType FieldType = Field->getType(); |
4433 | | |
4434 | 22.9k | if (!FieldType->isReferenceType()) |
4435 | 18.9k | return EmitLValueForField(Base, Field); |
4436 | | |
4437 | 4.00k | Address V = emitAddrOfFieldStorage(*this, Base.getAddress(*this), Field); |
4438 | | |
4439 | | // Make sure that the address is pointing to the right type. |
4440 | 4.00k | llvm::Type *llvmType = ConvertTypeForMem(FieldType); |
4441 | 4.00k | V = Builder.CreateElementBitCast(V, llvmType, Field->getName()); |
4442 | | |
4443 | | // TODO: Generate TBAA information that describes this access as a structure |
4444 | | // member access and not just an access to an object of the field's type. This |
4445 | | // should be similar to what we do in EmitLValueForField(). |
4446 | 4.00k | LValueBaseInfo BaseInfo = Base.getBaseInfo(); |
4447 | 4.00k | AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); |
4448 | 4.00k | LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource)); |
4449 | 4.00k | return MakeAddrLValue(V, FieldType, FieldBaseInfo, |
4450 | 4.00k | CGM.getTBAAInfoForSubobject(Base, FieldType)); |
4451 | 4.00k | } |
4452 | | |
4453 | 2.00k | LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ |
4454 | 2.00k | if (E->isFileScope()) { |
4455 | 3 | ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); |
4456 | 3 | return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl); |
4457 | 3 | } |
4458 | 2.00k | if (E->getType()->isVariablyModifiedType()) |
4459 | | // make sure to emit the VLA size. |
4460 | 4 | EmitVariablyModifiedType(E->getType()); |
4461 | | |
4462 | 2.00k | Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); |
4463 | 2.00k | const Expr *InitExpr = E->getInitializer(); |
4464 | 2.00k | LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl); |
4465 | | |
4466 | 2.00k | EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), |
4467 | 2.00k | /*Init*/ true); |
4468 | | |
4469 | | // Block-scope compound literals are destroyed at the end of the enclosing |
4470 | | // scope in C. |
4471 | 2.00k | if (!getLangOpts().CPlusPlus) |
4472 | 1.91k | if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) |
4473 | 12 | pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr, |
4474 | 12 | E->getType(), getDestroyer(DtorKind), |
4475 | 12 | DtorKind & EHCleanup); |
4476 | | |
4477 | 2.00k | return Result; |
4478 | 2.00k | } |
4479 | | |
4480 | 6 | LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { |
4481 | 6 | if (!E->isGLValue()) |
4482 | | // Initializing an aggregate temporary in C++11: T{...}. |
4483 | 0 | return EmitAggExprToLValue(E); |
4484 | | |
4485 | | // An lvalue initializer list must be initializing a reference. |
4486 | 6 | assert(E->isTransparent() && "non-transparent glvalue init list"); |
4487 | 6 | return EmitLValue(E->getInit(0)); |
4488 | 6 | } |
4489 | | |
4490 | | /// Emit the operand of a glvalue conditional operator. This is either a glvalue |
4491 | | /// or a (possibly-parenthesized) throw-expression. If this is a throw, no |
4492 | | /// LValue is returned and the current block has been terminated. |
4493 | | static Optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF, |
4494 | 984 | const Expr *Operand) { |
4495 | 984 | if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) { |
4496 | 2 | CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false); |
4497 | 2 | return None; |
4498 | 2 | } |
4499 | | |
4500 | 982 | return CGF.EmitLValue(Operand); |
4501 | 982 | } |
4502 | | |
4503 | | LValue CodeGenFunction:: |
4504 | 505 | EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { |
4505 | 505 | if (!expr->isGLValue()) { |
4506 | | // ?: here should be an aggregate. |
4507 | 9 | assert(hasAggregateEvaluationKind(expr->getType()) && |
4508 | 9 | "Unexpected conditional operator!"); |
4509 | 9 | return EmitAggExprToLValue(expr); |
4510 | 9 | } |
4511 | | |
4512 | 496 | OpaqueValueMapping binding(*this, expr); |
4513 | | |
4514 | 496 | const Expr *condExpr = expr->getCond(); |
4515 | 496 | bool CondExprBool; |
4516 | 496 | if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { |
4517 | 4 | const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); |
4518 | 4 | if (!CondExprBool) std::swap(live, dead)1 ; |
4519 | | |
4520 | 4 | if (!ContainsLabel(dead)) { |
4521 | | // If the true case is live, we need to track its region. |
4522 | 4 | if (CondExprBool) |
4523 | 3 | incrementProfileCounter(expr); |
4524 | | // If a throw expression we emit it and return an undefined lvalue |
4525 | | // because it can't be used. |
4526 | 4 | if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) { |
4527 | 1 | EmitCXXThrowExpr(ThrowExpr); |
4528 | 1 | llvm::Type *Ty = |
4529 | 1 | llvm::PointerType::getUnqual(ConvertType(dead->getType())); |
4530 | 1 | return MakeAddrLValue( |
4531 | 1 | Address(llvm::UndefValue::get(Ty), CharUnits::One()), |
4532 | 1 | dead->getType()); |
4533 | 1 | } |
4534 | 3 | return EmitLValue(live); |
4535 | 3 | } |
4536 | 4 | } |
4537 | | |
4538 | 492 | llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); |
4539 | 492 | llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); |
4540 | 492 | llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); |
4541 | | |
4542 | 492 | ConditionalEvaluation eval(*this); |
4543 | 492 | EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, getProfileCount(expr)); |
4544 | | |
4545 | | // Any temporaries created here are conditional. |
4546 | 492 | EmitBlock(lhsBlock); |
4547 | 492 | incrementProfileCounter(expr); |
4548 | 492 | eval.begin(*this); |
4549 | 492 | Optional<LValue> lhs = |
4550 | 492 | EmitLValueOrThrowExpression(*this, expr->getTrueExpr()); |
4551 | 492 | eval.end(*this); |
4552 | | |
4553 | 492 | if (lhs && !lhs->isSimple()491 ) |
4554 | 0 | return EmitUnsupportedLValue(expr, "conditional operator"); |
4555 | | |
4556 | 492 | lhsBlock = Builder.GetInsertBlock(); |
4557 | 492 | if (lhs) |
4558 | 491 | Builder.CreateBr(contBlock); |
4559 | | |
4560 | | // Any temporaries created here are conditional. |
4561 | 492 | EmitBlock(rhsBlock); |
4562 | 492 | eval.begin(*this); |
4563 | 492 | Optional<LValue> rhs = |
4564 | 492 | EmitLValueOrThrowExpression(*this, expr->getFalseExpr()); |
4565 | 492 | eval.end(*this); |
4566 | 492 | if (rhs && !rhs->isSimple()491 ) |
4567 | 0 | return EmitUnsupportedLValue(expr, "conditional operator"); |
4568 | 492 | rhsBlock = Builder.GetInsertBlock(); |
4569 | | |
4570 | 492 | EmitBlock(contBlock); |
4571 | | |
4572 | 492 | if (lhs && rhs491 ) { |
4573 | 490 | llvm::PHINode *phi = |
4574 | 490 | Builder.CreatePHI(lhs->getPointer(*this)->getType(), 2, "cond-lvalue"); |
4575 | 490 | phi->addIncoming(lhs->getPointer(*this), lhsBlock); |
4576 | 490 | phi->addIncoming(rhs->getPointer(*this), rhsBlock); |
4577 | 490 | Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment())); |
4578 | 490 | AlignmentSource alignSource = |
4579 | 490 | std::max(lhs->getBaseInfo().getAlignmentSource(), |
4580 | 490 | rhs->getBaseInfo().getAlignmentSource()); |
4581 | 490 | TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator( |
4582 | 490 | lhs->getTBAAInfo(), rhs->getTBAAInfo()); |
4583 | 490 | return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource), |
4584 | 490 | TBAAInfo); |
4585 | 2 | } else { |
4586 | 2 | assert((lhs || rhs) && |
4587 | 2 | "both operands of glvalue conditional are throw-expressions?"); |
4588 | 1 | return lhs ? *lhs : *rhs; |
4589 | 2 | } |
4590 | 492 | } |
4591 | | |
4592 | | /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference |
4593 | | /// type. If the cast is to a reference, we can have the usual lvalue result, |
4594 | | /// otherwise if a cast is needed by the code generator in an lvalue context, |
4595 | | /// then it must mean that we need the address of an aggregate in order to |
4596 | | /// access one of its members. This can happen for all the reasons that casts |
4597 | | /// are permitted with aggregate result, including noop aggregate casts, and |
4598 | | /// cast from scalar to union. |
4599 | 39.3k | LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { |
4600 | 39.3k | switch (E->getCastKind()) { |
4601 | 0 | case CK_ToVoid: |
4602 | 0 | case CK_BitCast: |
4603 | 0 | case CK_LValueToRValueBitCast: |
4604 | 0 | case CK_ArrayToPointerDecay: |
4605 | 0 | case CK_FunctionToPointerDecay: |
4606 | 0 | case CK_NullToMemberPointer: |
4607 | 0 | case CK_NullToPointer: |
4608 | 0 | case CK_IntegralToPointer: |
4609 | 0 | case CK_PointerToIntegral: |
4610 | 0 | case CK_PointerToBoolean: |
4611 | 0 | case CK_VectorSplat: |
4612 | 0 | case CK_IntegralCast: |
4613 | 0 | case CK_BooleanToSignedIntegral: |
4614 | 0 | case CK_IntegralToBoolean: |
4615 | 0 | case CK_IntegralToFloating: |
4616 | 0 | case CK_FloatingToIntegral: |
4617 | 0 | case CK_FloatingToBoolean: |
4618 | 0 | case CK_FloatingCast: |
4619 | 0 | case CK_FloatingRealToComplex: |
4620 | 0 | case CK_FloatingComplexToReal: |
4621 | 0 | case CK_FloatingComplexToBoolean: |
4622 | 0 | case CK_FloatingComplexCast: |
4623 | 0 | case CK_FloatingComplexToIntegralComplex: |
4624 | 0 | case CK_IntegralRealToComplex: |
4625 | 0 | case CK_IntegralComplexToReal: |
4626 | 0 | case CK_IntegralComplexToBoolean: |
4627 | 0 | case CK_IntegralComplexCast: |
4628 | 0 | case CK_IntegralComplexToFloatingComplex: |
4629 | 0 | case CK_DerivedToBaseMemberPointer: |
4630 | 0 | case CK_BaseToDerivedMemberPointer: |
4631 | 0 | case CK_MemberPointerToBoolean: |
4632 | 0 | case CK_ReinterpretMemberPointer: |
4633 | 0 | case CK_AnyPointerToBlockPointerCast: |
4634 | 0 | case CK_ARCProduceObject: |
4635 | 0 | case CK_ARCConsumeObject: |
4636 | 0 | case CK_ARCReclaimReturnedObject: |
4637 | 0 | case CK_ARCExtendBlockObject: |
4638 | 0 | case CK_CopyAndAutoreleaseBlockObject: |
4639 | 0 | case CK_IntToOCLSampler: |
4640 | 0 | case CK_FloatingToFixedPoint: |
4641 | 0 | case CK_FixedPointToFloating: |
4642 | 0 | case CK_FixedPointCast: |
4643 | 0 | case CK_FixedPointToBoolean: |
4644 | 0 | case CK_FixedPointToIntegral: |
4645 | 0 | case CK_IntegralToFixedPoint: |
4646 | 0 | return EmitUnsupportedLValue(E, "unexpected cast lvalue"); |
4647 | |
|
4648 | 0 | case CK_Dependent: |
4649 | 0 | llvm_unreachable("dependent cast kind in IR gen!"); |
4650 | |
|
4651 | 0 | case CK_BuiltinFnToFnPtr: |
4652 | 0 | llvm_unreachable("builtin functions are handled elsewhere"); |
4653 | | |
4654 | | // These are never l-values; just use the aggregate emission code. |
4655 | 0 | case CK_NonAtomicToAtomic: |
4656 | 1 | case CK_AtomicToNonAtomic: |
4657 | 1 | return EmitAggExprToLValue(E); |
4658 | |
|
4659 | 13 | case CK_Dynamic: { |
4660 | 13 | LValue LV = EmitLValue(E->getSubExpr()); |
4661 | 13 | Address V = LV.getAddress(*this); |
4662 | 13 | const auto *DCE = cast<CXXDynamicCastExpr>(E); |
4663 | 13 | return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType()); |
4664 | 0 | } |
4665 | |
|
4666 | 0 | case CK_ConstructorConversion: |
4667 | 13 | case CK_UserDefinedConversion: |
4668 | 13 | case CK_CPointerToObjCPointerCast: |
4669 | 13 | case CK_BlockPointerToObjCPointerCast: |
4670 | 29.4k | case CK_NoOp: |
4671 | 29.5k | case CK_LValueToRValue: |
4672 | 29.5k | return EmitLValue(E->getSubExpr()); |
4673 | | |
4674 | 2.66k | case CK_UncheckedDerivedToBase: |
4675 | 9.64k | case CK_DerivedToBase: { |
4676 | 9.64k | const auto *DerivedClassTy = |
4677 | 9.64k | E->getSubExpr()->getType()->castAs<RecordType>(); |
4678 | 9.64k | auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); |
4679 | | |
4680 | 9.64k | LValue LV = EmitLValue(E->getSubExpr()); |
4681 | 9.64k | Address This = LV.getAddress(*this); |
4682 | | |
4683 | | // Perform the derived-to-base conversion |
4684 | 9.64k | Address Base = GetAddressOfBaseClass( |
4685 | 9.64k | This, DerivedClassDecl, E->path_begin(), E->path_end(), |
4686 | 9.64k | /*NullCheckValue=*/false, E->getExprLoc()); |
4687 | | |
4688 | | // TODO: Support accesses to members of base classes in TBAA. For now, we |
4689 | | // conservatively pretend that the complete object is of the base class |
4690 | | // type. |
4691 | 9.64k | return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(), |
4692 | 9.64k | CGM.getTBAAInfoForSubobject(LV, E->getType())); |
4693 | 2.66k | } |
4694 | 3 | case CK_ToUnion: |
4695 | 3 | return EmitAggExprToLValue(E); |
4696 | 81 | case CK_BaseToDerived: { |
4697 | 81 | const auto *DerivedClassTy = E->getType()->castAs<RecordType>(); |
4698 | 81 | auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); |
4699 | | |
4700 | 81 | LValue LV = EmitLValue(E->getSubExpr()); |
4701 | | |
4702 | | // Perform the base-to-derived conversion |
4703 | 81 | Address Derived = GetAddressOfDerivedClass( |
4704 | 81 | LV.getAddress(*this), DerivedClassDecl, E->path_begin(), E->path_end(), |
4705 | 81 | /*NullCheckValue=*/false); |
4706 | | |
4707 | | // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is |
4708 | | // performed and the object is not of the derived type. |
4709 | 81 | if (sanitizePerformTypeCheck()) |
4710 | 7 | EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), |
4711 | 7 | Derived.getPointer(), E->getType()); |
4712 | | |
4713 | 81 | if (SanOpts.has(SanitizerKind::CFIDerivedCast)) |
4714 | 3 | EmitVTablePtrCheckForCast(E->getType(), Derived.getPointer(), |
4715 | 3 | /*MayBeNull=*/false, CFITCK_DerivedCast, |
4716 | 3 | E->getBeginLoc()); |
4717 | | |
4718 | 81 | return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(), |
4719 | 81 | CGM.getTBAAInfoForSubobject(LV, E->getType())); |
4720 | 2.66k | } |
4721 | 23 | case CK_LValueBitCast: { |
4722 | | // This must be a reinterpret_cast (or c-style equivalent). |
4723 | 23 | const auto *CE = cast<ExplicitCastExpr>(E); |
4724 | | |
4725 | 23 | CGM.EmitExplicitCastExprType(CE, this); |
4726 | 23 | LValue LV = EmitLValue(E->getSubExpr()); |
4727 | 23 | Address V = Builder.CreateBitCast(LV.getAddress(*this), |
4728 | 23 | ConvertType(CE->getTypeAsWritten())); |
4729 | | |
4730 | 23 | if (SanOpts.has(SanitizerKind::CFIUnrelatedCast)) |
4731 | 4 | EmitVTablePtrCheckForCast(E->getType(), V.getPointer(), |
4732 | 4 | /*MayBeNull=*/false, CFITCK_UnrelatedCast, |
4733 | 4 | E->getBeginLoc()); |
4734 | | |
4735 | 23 | return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), |
4736 | 23 | CGM.getTBAAInfoForSubobject(LV, E->getType())); |
4737 | 2.66k | } |
4738 | 66 | case CK_AddressSpaceConversion: { |
4739 | 66 | LValue LV = EmitLValue(E->getSubExpr()); |
4740 | 66 | QualType DestTy = getContext().getPointerType(E->getType()); |
4741 | 66 | llvm::Value *V = getTargetHooks().performAddrSpaceCast( |
4742 | 66 | *this, LV.getPointer(*this), |
4743 | 66 | E->getSubExpr()->getType().getAddressSpace(), |
4744 | 66 | E->getType().getAddressSpace(), ConvertType(DestTy)); |
4745 | 66 | return MakeAddrLValue(Address(V, LV.getAddress(*this).getAlignment()), |
4746 | 66 | E->getType(), LV.getBaseInfo(), LV.getTBAAInfo()); |
4747 | 2.66k | } |
4748 | 5 | case CK_ObjCObjectLValueCast: { |
4749 | 5 | LValue LV = EmitLValue(E->getSubExpr()); |
4750 | 5 | Address V = Builder.CreateElementBitCast(LV.getAddress(*this), |
4751 | 5 | ConvertType(E->getType())); |
4752 | 5 | return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), |
4753 | 5 | CGM.getTBAAInfoForSubobject(LV, E->getType())); |
4754 | 2.66k | } |
4755 | 0 | case CK_ZeroToOCLOpaqueType: |
4756 | 0 | llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid"); |
4757 | 0 | } |
4758 | | |
4759 | 0 | llvm_unreachable("Unhandled lvalue cast kind?"); |
4760 | 0 | } |
4761 | | |
4762 | 588 | LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { |
4763 | 588 | assert(OpaqueValueMappingData::shouldBindAsLValue(e)); |
4764 | 588 | return getOrCreateOpaqueLValueMapping(e); |
4765 | 588 | } |
4766 | | |
4767 | | LValue |
4768 | 845 | CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) { |
4769 | 845 | assert(OpaqueValueMapping::shouldBindAsLValue(e)); |
4770 | | |
4771 | 845 | llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator |
4772 | 845 | it = OpaqueLValues.find(e); |
4773 | | |
4774 | 845 | if (it != OpaqueLValues.end()) |
4775 | 739 | return it->second; |
4776 | | |
4777 | 106 | assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted"); |
4778 | 106 | return EmitLValue(e->getSourceExpr()); |
4779 | 106 | } |
4780 | | |
4781 | | RValue |
4782 | 2.20k | CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { |
4783 | 2.20k | assert(!OpaqueValueMapping::shouldBindAsLValue(e)); |
4784 | | |
4785 | 2.20k | llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator |
4786 | 2.20k | it = OpaqueRValues.find(e); |
4787 | | |
4788 | 2.20k | if (it != OpaqueRValues.end()) |
4789 | 1.64k | return it->second; |
4790 | | |
4791 | 558 | assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted"); |
4792 | 558 | return EmitAnyExpr(e->getSourceExpr()); |
4793 | 558 | } |
4794 | | |
4795 | | RValue CodeGenFunction::EmitRValueForField(LValue LV, |
4796 | | const FieldDecl *FD, |
4797 | 44 | SourceLocation Loc) { |
4798 | 44 | QualType FT = FD->getType(); |
4799 | 44 | LValue FieldLV = EmitLValueForField(LV, FD); |
4800 | 44 | switch (getEvaluationKind(FT)) { |
4801 | 1 | case TEK_Complex: |
4802 | 1 | return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc)); |
4803 | 0 | case TEK_Aggregate: |
4804 | 0 | return FieldLV.asAggregateRValue(*this); |
4805 | 43 | case TEK_Scalar: |
4806 | | // This routine is used to load fields one-by-one to perform a copy, so |
4807 | | // don't load reference fields. |
4808 | 43 | if (FD->getType()->isReferenceType()) |
4809 | 1 | return RValue::get(FieldLV.getPointer(*this)); |
4810 | | // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a |
4811 | | // primitive load. |
4812 | 42 | if (FieldLV.isBitField()) |
4813 | 0 | return EmitLoadOfLValue(FieldLV, Loc); |
4814 | 42 | return RValue::get(EmitLoadOfScalar(FieldLV, Loc)); |
4815 | 0 | } |
4816 | 0 | llvm_unreachable("bad evaluation kind"); |
4817 | 0 | } |
4818 | | |
4819 | | //===--------------------------------------------------------------------===// |
4820 | | // Expression Emission |
4821 | | //===--------------------------------------------------------------------===// |
4822 | | |
4823 | | RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, |
4824 | 294k | ReturnValueSlot ReturnValue) { |
4825 | | // Builtins never have block type. |
4826 | 294k | if (E->getCallee()->getType()->isBlockPointerType()) |
4827 | 570 | return EmitBlockCallExpr(E, ReturnValue); |
4828 | | |
4829 | 293k | if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E)) |
4830 | 60.0k | return EmitCXXMemberCallExpr(CE, ReturnValue); |
4831 | | |
4832 | 233k | if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E)) |
4833 | 32 | return EmitCUDAKernelCallExpr(CE, ReturnValue); |
4834 | | |
4835 | 233k | if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E)) |
4836 | 12.1k | if (const CXXMethodDecl *MD = |
4837 | 10.4k | dyn_cast_or_null<CXXMethodDecl>(CE->getCalleeDecl())) |
4838 | 10.4k | return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); |
4839 | | |
4840 | 223k | CGCallee callee = EmitCallee(E->getCallee()); |
4841 | | |
4842 | 223k | if (callee.isBuiltin()) { |
4843 | 62.7k | return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), |
4844 | 62.7k | E, ReturnValue); |
4845 | 62.7k | } |
4846 | | |
4847 | 160k | if (callee.isPseudoDestructor()) { |
4848 | 183 | return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr()); |
4849 | 183 | } |
4850 | | |
4851 | 160k | return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue); |
4852 | 160k | } |
4853 | | |
4854 | | /// Emit a CallExpr without considering whether it might be a subclass. |
4855 | | RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E, |
4856 | 32 | ReturnValueSlot ReturnValue) { |
4857 | 32 | CGCallee Callee = EmitCallee(E->getCallee()); |
4858 | 32 | return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue); |
4859 | 32 | } |
4860 | | |
4861 | 218k | static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) { |
4862 | 218k | const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); |
4863 | | |
4864 | 218k | if (auto builtinID = FD->getBuiltinID()) { |
4865 | | // Replaceable builtin provide their own implementation of a builtin. Unless |
4866 | | // we are in the builtin implementation itself, don't call the actual |
4867 | | // builtin. If we are in the builtin implementation, avoid trivial infinite |
4868 | | // recursion. |
4869 | 62.7k | if (!FD->isInlineBuiltinDeclaration() || |
4870 | 18 | CGF.CurFn->getName() == FD->getName()) |
4871 | 62.7k | return CGCallee::forBuiltin(builtinID, FD); |
4872 | 155k | } |
4873 | | |
4874 | 155k | llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, GD); |
4875 | 155k | return CGCallee::forDirect(calleePtr, GD); |
4876 | 155k | } |
4877 | | |
4878 | 441k | CGCallee CodeGenFunction::EmitCallee(const Expr *E) { |
4879 | 441k | E = E->IgnoreParens(); |
4880 | | |
4881 | | // Look through function-to-pointer decay. |
4882 | 441k | if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) { |
4883 | 222k | if (ICE->getCastKind() == CK_FunctionToPointerDecay || |
4884 | 218k | ICE->getCastKind() == CK_BuiltinFnToFnPtr57.2k ) { |
4885 | 218k | return EmitCallee(ICE->getSubExpr()); |
4886 | 218k | } |
4887 | | |
4888 | | // Resolve direct calls. |
4889 | 218k | } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) { |
4890 | 218k | if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) { |
4891 | 218k | return EmitDirectCallee(*this, FD); |
4892 | 218k | } |
4893 | 617 | } else if (auto ME = dyn_cast<MemberExpr>(E)) { |
4894 | 139 | if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) { |
4895 | 139 | EmitIgnoredExpr(ME->getBase()); |
4896 | 139 | return EmitDirectCallee(*this, FD); |
4897 | 139 | } |
4898 | | |
4899 | | // Look through template substitutions. |
4900 | 478 | } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) { |
4901 | 14 | return EmitCallee(NTTP->getReplacement()); |
4902 | | |
4903 | | // Treat pseudo-destructor calls differently. |
4904 | 464 | } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) { |
4905 | 183 | return CGCallee::forPseudoDestructor(PDE); |
4906 | 183 | } |
4907 | | |
4908 | | // Otherwise, we have an indirect reference. |
4909 | 4.69k | llvm::Value *calleePtr; |
4910 | 4.69k | QualType functionType; |
4911 | 4.69k | if (auto ptrType = E->getType()->getAs<PointerType>()) { |
4912 | 4.67k | calleePtr = EmitScalarExpr(E); |
4913 | 4.67k | functionType = ptrType->getPointeeType(); |
4914 | 24 | } else { |
4915 | 24 | functionType = E->getType(); |
4916 | 24 | calleePtr = EmitLValue(E).getPointer(*this); |
4917 | 24 | } |
4918 | 4.69k | assert(functionType->isFunctionType()); |
4919 | | |
4920 | 4.69k | GlobalDecl GD; |
4921 | 4.69k | if (const auto *VD = |
4922 | 2.67k | dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee())) |
4923 | 2.67k | GD = GlobalDecl(VD); |
4924 | | |
4925 | 4.69k | CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD); |
4926 | 4.69k | CGCallee callee(calleeInfo, calleePtr); |
4927 | 4.69k | return callee; |
4928 | 4.69k | } |
4929 | | |
4930 | 116k | LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { |
4931 | | // Comma expressions just emit their LHS then their RHS as an l-value. |
4932 | 116k | if (E->getOpcode() == BO_Comma) { |
4933 | 539 | EmitIgnoredExpr(E->getLHS()); |
4934 | 539 | EnsureInsertPoint(); |
4935 | 539 | return EmitLValue(E->getRHS()); |
4936 | 539 | } |
4937 | | |
4938 | 116k | if (E->getOpcode() == BO_PtrMemD || |
4939 | 116k | E->getOpcode() == BO_PtrMemI) |
4940 | 81 | return EmitPointerToDataMemberBinaryExpr(E); |
4941 | | |
4942 | 116k | assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); |
4943 | | |
4944 | | // Note that in all of these cases, __block variables need the RHS |
4945 | | // evaluated first just in case the variable gets moved by the RHS. |
4946 | | |
4947 | 116k | switch (getEvaluationKind(E->getType())) { |
4948 | 116k | case TEK_Scalar: { |
4949 | 116k | switch (E->getLHS()->getType().getObjCLifetime()) { |
4950 | 29 | case Qualifiers::OCL_Strong: |
4951 | 29 | return EmitARCStoreStrong(E, /*ignored*/ false).first; |
4952 | | |
4953 | 0 | case Qualifiers::OCL_Autoreleasing: |
4954 | 0 | return EmitARCStoreAutoreleasing(E).first; |
4955 | | |
4956 | | // No reason to do any of these differently. |
4957 | 116k | case Qualifiers::OCL_None: |
4958 | 116k | case Qualifiers::OCL_ExplicitNone: |
4959 | 116k | case Qualifiers::OCL_Weak: |
4960 | 116k | break; |
4961 | 116k | } |
4962 | | |
4963 | 116k | RValue RV = EmitAnyExpr(E->getRHS()); |
4964 | 116k | LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store); |
4965 | 116k | if (RV.isScalar()) |
4966 | 116k | EmitNullabilityCheck(LV, RV.getScalarVal(), E->getExprLoc()); |
4967 | 116k | EmitStoreThroughLValue(RV, LV); |
4968 | 116k | if (getLangOpts().OpenMP) |
4969 | 81.2k | CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this, |
4970 | 81.2k | E->getLHS()); |
4971 | 116k | return LV; |
4972 | 116k | } |
4973 | | |
4974 | 31 | case TEK_Complex: |
4975 | 31 | return EmitComplexAssignmentLValue(E); |
4976 | | |
4977 | 6 | case TEK_Aggregate: |
4978 | 6 | return EmitAggExprToLValue(E); |
4979 | 0 | } |
4980 | 0 | llvm_unreachable("bad evaluation kind"); |
4981 | 0 | } |
4982 | | |
4983 | 44.1k | LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { |
4984 | 44.1k | RValue RV = EmitCallExpr(E); |
4985 | | |
4986 | 44.1k | if (!RV.isScalar()) |
4987 | 16 | return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), |
4988 | 16 | AlignmentSource::Decl); |
4989 | | |
4990 | 44.1k | assert(E->getCallReturnType(getContext())->isReferenceType() && |
4991 | 44.1k | "Can't have a scalar return unless the return type is a " |
4992 | 44.1k | "reference type!"); |
4993 | | |
4994 | 44.1k | return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); |
4995 | 44.1k | } |
4996 | | |
4997 | 11 | LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { |
4998 | | // FIXME: This shouldn't require another copy. |
4999 | 11 | return EmitAggExprToLValue(E); |
5000 | 11 | } |
5001 | | |
5002 | 1 | LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { |
5003 | 1 | assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() |
5004 | 1 | && "binding l-value to type which needs a temporary"); |
5005 | 1 | AggValueSlot Slot = CreateAggTemp(E->getType()); |
5006 | 1 | EmitCXXConstructExpr(E, Slot); |
5007 | 1 | return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl); |
5008 | 1 | } |
5009 | | |
5010 | | LValue |
5011 | 338 | CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { |
5012 | 338 | return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType()); |
5013 | 338 | } |
5014 | | |
5015 | 23 | Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { |
5016 | 23 | return Builder.CreateElementBitCast(CGM.GetAddrOfMSGuidDecl(E->getGuidDecl()), |
5017 | 23 | ConvertType(E->getType())); |
5018 | 23 | } |
5019 | | |
5020 | 23 | LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { |
5021 | 23 | return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(), |
5022 | 23 | AlignmentSource::Decl); |
5023 | 23 | } |
5024 | | |
5025 | | LValue |
5026 | 3 | CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { |
5027 | 3 | AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); |
5028 | 3 | Slot.setExternallyDestructed(); |
5029 | 3 | EmitAggExpr(E->getSubExpr(), Slot); |
5030 | 3 | EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress()); |
5031 | 3 | return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl); |
5032 | 3 | } |
5033 | | |
5034 | 20 | LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { |
5035 | 20 | RValue RV = EmitObjCMessageExpr(E); |
5036 | | |
5037 | 20 | if (!RV.isScalar()) |
5038 | 7 | return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), |
5039 | 7 | AlignmentSource::Decl); |
5040 | | |
5041 | 13 | assert(E->getMethodDecl()->getReturnType()->isReferenceType() && |
5042 | 13 | "Can't have a scalar return unless the return type is a " |
5043 | 13 | "reference type!"); |
5044 | | |
5045 | 13 | return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); |
5046 | 13 | } |
5047 | | |
5048 | 1 | LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { |
5049 | 1 | Address V = |
5050 | 1 | CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector()); |
5051 | 1 | return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl); |
5052 | 1 | } |
5053 | | |
5054 | | llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, |
5055 | 263 | const ObjCIvarDecl *Ivar) { |
5056 | 263 | return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); |
5057 | 263 | } |
5058 | | |
5059 | | LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, |
5060 | | llvm::Value *BaseValue, |
5061 | | const ObjCIvarDecl *Ivar, |
5062 | 2.22k | unsigned CVRQualifiers) { |
5063 | 2.22k | return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, |
5064 | 2.22k | Ivar, CVRQualifiers); |
5065 | 2.22k | } |
5066 | | |
5067 | 1.53k | LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { |
5068 | | // FIXME: A lot of the code below could be shared with EmitMemberExpr. |
5069 | 1.53k | llvm::Value *BaseValue = nullptr; |
5070 | 1.53k | const Expr *BaseExpr = E->getBase(); |
5071 | 1.53k | Qualifiers BaseQuals; |
5072 | 1.53k | QualType ObjectTy; |
5073 | 1.53k | if (E->isArrow()) { |
5074 | 1.52k | BaseValue = EmitScalarExpr(BaseExpr); |
5075 | 1.52k | ObjectTy = BaseExpr->getType()->getPointeeType(); |
5076 | 1.52k | BaseQuals = ObjectTy.getQualifiers(); |
5077 | 6 | } else { |
5078 | 6 | LValue BaseLV = EmitLValue(BaseExpr); |
5079 | 6 | BaseValue = BaseLV.getPointer(*this); |
5080 | 6 | ObjectTy = BaseExpr->getType(); |
5081 | 6 | BaseQuals = ObjectTy.getQualifiers(); |
5082 | 6 | } |
5083 | | |
5084 | 1.53k | LValue LV = |
5085 | 1.53k | EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), |
5086 | 1.53k | BaseQuals.getCVRQualifiers()); |
5087 | 1.53k | setObjCGCLValueClass(getContext(), E, LV); |
5088 | 1.53k | return LV; |
5089 | 1.53k | } |
5090 | | |
5091 | 1 | LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { |
5092 | | // Can only get l-value for message expression returning aggregate type |
5093 | 1 | RValue RV = EmitAnyExprToTemp(E); |
5094 | 1 | return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), |
5095 | 1 | AlignmentSource::Decl); |
5096 | 1 | } |
5097 | | |
5098 | | RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee, |
5099 | | const CallExpr *E, ReturnValueSlot ReturnValue, |
5100 | 163k | llvm::Value *Chain) { |
5101 | | // Get the actual function type. The callee type will always be a pointer to |
5102 | | // function type or a block pointer type. |
5103 | 163k | assert(CalleeType->isFunctionPointerType() && |
5104 | 163k | "Call must have function pointer type!"); |
5105 | | |
5106 | 163k | const Decl *TargetDecl = |
5107 | 163k | OrigCallee.getAbstractInfo().getCalleeDecl().getDecl(); |
5108 | | |
5109 | 163k | CalleeType = getContext().getCanonicalType(CalleeType); |
5110 | | |
5111 | 163k | auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType(); |
5112 | | |
5113 | 163k | CGCallee Callee = OrigCallee; |
5114 | | |
5115 | 163k | if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)109k && |
5116 | 25 | (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) { |
5117 | 8 | if (llvm::Constant *PrefixSig = |
5118 | 8 | CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { |
5119 | 8 | SanitizerScope SanScope(this); |
5120 | | // Remove any (C++17) exception specifications, to allow calling e.g. a |
5121 | | // noexcept function through a non-noexcept pointer. |
5122 | 8 | auto ProtoTy = |
5123 | 8 | getContext().getFunctionTypeWithExceptionSpec(PointeeType, EST_None); |
5124 | 8 | llvm::Constant *FTRTTIConst = |
5125 | 8 | CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true); |
5126 | 8 | llvm::Type *PrefixStructTyElems[] = {PrefixSig->getType(), Int32Ty}; |
5127 | 8 | llvm::StructType *PrefixStructTy = llvm::StructType::get( |
5128 | 8 | CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true); |
5129 | | |