/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGObjC.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This contains code to emit Objective-C code as LLVM code. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "CGDebugInfo.h" |
14 | | #include "CGObjCRuntime.h" |
15 | | #include "CodeGenFunction.h" |
16 | | #include "CodeGenModule.h" |
17 | | #include "ConstantEmitter.h" |
18 | | #include "TargetInfo.h" |
19 | | #include "clang/AST/ASTContext.h" |
20 | | #include "clang/AST/Attr.h" |
21 | | #include "clang/AST/DeclObjC.h" |
22 | | #include "clang/AST/StmtObjC.h" |
23 | | #include "clang/Basic/Diagnostic.h" |
24 | | #include "clang/CodeGen/CGFunctionInfo.h" |
25 | | #include "llvm/ADT/STLExtras.h" |
26 | | #include "llvm/Analysis/ObjCARCUtil.h" |
27 | | #include "llvm/BinaryFormat/MachO.h" |
28 | | #include "llvm/IR/DataLayout.h" |
29 | | #include "llvm/IR/InlineAsm.h" |
30 | | using namespace clang; |
31 | | using namespace CodeGen; |
32 | | |
33 | | typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult; |
34 | | static TryEmitResult |
35 | | tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e); |
36 | | static RValue AdjustObjCObjectType(CodeGenFunction &CGF, |
37 | | QualType ET, |
38 | | RValue Result); |
39 | | |
40 | | /// Given the address of a variable of pointer type, find the correct |
41 | | /// null to store into it. |
42 | 1.06k | static llvm::Constant *getNullForVariable(Address addr) { |
43 | 1.06k | llvm::Type *type = addr.getElementType(); |
44 | 1.06k | return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type)); |
45 | 1.06k | } |
46 | | |
47 | | /// Emits an instance of NSConstantString representing the object. |
48 | | llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E) |
49 | 4.62k | { |
50 | 4.62k | llvm::Constant *C = |
51 | 4.62k | CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer(); |
52 | | // FIXME: This bitcast should just be made an invariant on the Runtime. |
53 | 4.62k | return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType())); |
54 | 4.62k | } |
55 | | |
56 | | /// EmitObjCBoxedExpr - This routine generates code to call |
57 | | /// the appropriate expression boxing method. This will either be |
58 | | /// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:], |
59 | | /// or [NSValue valueWithBytes:objCType:]. |
60 | | /// |
61 | | llvm::Value * |
62 | 730 | CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) { |
63 | | // Generate the correct selector for this literal's concrete type. |
64 | | // Get the method. |
65 | 730 | const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod(); |
66 | 730 | const Expr *SubExpr = E->getSubExpr(); |
67 | | |
68 | 730 | if (E->isExpressibleAsConstantInitializer()) { |
69 | 3 | ConstantEmitter ConstEmitter(CGM); |
70 | 3 | return ConstEmitter.tryEmitAbstract(E, E->getType()); |
71 | 3 | } |
72 | | |
73 | 727 | assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method"); |
74 | 0 | Selector Sel = BoxingMethod->getSelector(); |
75 | | |
76 | | // Generate a reference to the class pointer, which will be the receiver. |
77 | | // Assumes that the method was introduced in the class that should be |
78 | | // messaged (avoids pulling it out of the result type). |
79 | 727 | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
80 | 727 | const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface(); |
81 | 727 | llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl); |
82 | | |
83 | 727 | CallArgList Args; |
84 | 727 | const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin(); |
85 | 727 | QualType ArgQT = ArgDecl->getType().getUnqualifiedType(); |
86 | | |
87 | | // ObjCBoxedExpr supports boxing of structs and unions |
88 | | // via [NSValue valueWithBytes:objCType:] |
89 | 727 | const QualType ValueType(SubExpr->getType().getCanonicalType()); |
90 | 727 | if (ValueType->isObjCBoxableRecordType()) { |
91 | | // Emit CodeGen for first parameter |
92 | | // and cast value to correct type |
93 | 26 | Address Temporary = CreateMemTemp(SubExpr->getType()); |
94 | 26 | EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true); |
95 | 26 | llvm::Value *BitCast = |
96 | 26 | Builder.CreateBitCast(Temporary.getPointer(), ConvertType(ArgQT)); |
97 | 26 | Args.add(RValue::get(BitCast), ArgQT); |
98 | | |
99 | | // Create char array to store type encoding |
100 | 26 | std::string Str; |
101 | 26 | getContext().getObjCEncodingForType(ValueType, Str); |
102 | 26 | llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer(); |
103 | | |
104 | | // Cast type encoding to correct type |
105 | 26 | const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1]; |
106 | 26 | QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType(); |
107 | 26 | llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT)); |
108 | | |
109 | 26 | Args.add(RValue::get(Cast), EncodingQT); |
110 | 701 | } else { |
111 | 701 | Args.add(EmitAnyExpr(SubExpr), ArgQT); |
112 | 701 | } |
113 | | |
114 | 727 | RValue result = Runtime.GenerateMessageSend( |
115 | 727 | *this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver, |
116 | 727 | Args, ClassDecl, BoxingMethod); |
117 | 727 | return Builder.CreateBitCast(result.getScalarVal(), |
118 | 727 | ConvertType(E->getType())); |
119 | 730 | } |
120 | | |
121 | | llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, |
122 | 226 | const ObjCMethodDecl *MethodWithObjects) { |
123 | 226 | ASTContext &Context = CGM.getContext(); |
124 | 226 | const ObjCDictionaryLiteral *DLE = nullptr; |
125 | 226 | const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E); |
126 | 226 | if (!ALE) |
127 | 107 | DLE = cast<ObjCDictionaryLiteral>(E); |
128 | | |
129 | | // Optimize empty collections by referencing constants, when available. |
130 | 226 | uint64_t NumElements = |
131 | 226 | ALE ? ALE->getNumElements()119 : DLE->getNumElements()107 ; |
132 | 226 | if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()19 ) { |
133 | 11 | StringRef ConstantName = ALE ? "__NSArray0__"7 : "__NSDictionary0__"4 ; |
134 | 11 | QualType IdTy(CGM.getContext().getObjCIdType()); |
135 | 11 | llvm::Constant *Constant = |
136 | 11 | CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName); |
137 | 11 | LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy); |
138 | 11 | llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getBeginLoc()); |
139 | 11 | cast<llvm::LoadInst>(Ptr)->setMetadata( |
140 | 11 | CGM.getModule().getMDKindID("invariant.load"), |
141 | 11 | llvm::MDNode::get(getLLVMContext(), None)); |
142 | 11 | return Builder.CreateBitCast(Ptr, ConvertType(E->getType())); |
143 | 11 | } |
144 | | |
145 | | // Compute the type of the array we're initializing. |
146 | 215 | llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()), |
147 | 215 | NumElements); |
148 | 215 | QualType ElementType = Context.getObjCIdType().withConst(); |
149 | 215 | QualType ElementArrayType |
150 | 215 | = Context.getConstantArrayType(ElementType, APNumElements, nullptr, |
151 | 215 | ArrayType::Normal, /*IndexTypeQuals=*/0); |
152 | | |
153 | | // Allocate the temporary array(s). |
154 | 215 | Address Objects = CreateMemTemp(ElementArrayType, "objects"); |
155 | 215 | Address Keys = Address::invalid(); |
156 | 215 | if (DLE) |
157 | 103 | Keys = CreateMemTemp(ElementArrayType, "keys"); |
158 | | |
159 | | // In ARC, we may need to do extra work to keep all the keys and |
160 | | // values alive until after the call. |
161 | 215 | SmallVector<llvm::Value *, 16> NeededObjects; |
162 | 215 | bool TrackNeededObjects = |
163 | 215 | (getLangOpts().ObjCAutoRefCount && |
164 | 215 | CGM.getCodeGenOpts().OptimizationLevel != 013 ); |
165 | | |
166 | | // Perform the actual initialialization of the array(s). |
167 | 656 | for (uint64_t i = 0; i < NumElements; i++441 ) { |
168 | 441 | if (ALE) { |
169 | | // Emit the element and store it to the appropriate array slot. |
170 | 278 | const Expr *Rhs = ALE->getElement(i); |
171 | 278 | LValue LV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i), |
172 | 278 | ElementType, AlignmentSource::Decl); |
173 | | |
174 | 278 | llvm::Value *value = EmitScalarExpr(Rhs); |
175 | 278 | EmitStoreThroughLValue(RValue::get(value), LV, true); |
176 | 278 | if (TrackNeededObjects) { |
177 | 7 | NeededObjects.push_back(value); |
178 | 7 | } |
179 | 278 | } else { |
180 | | // Emit the key and store it to the appropriate array slot. |
181 | 163 | const Expr *Key = DLE->getKeyValueElement(i).Key; |
182 | 163 | LValue KeyLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Keys, i), |
183 | 163 | ElementType, AlignmentSource::Decl); |
184 | 163 | llvm::Value *keyValue = EmitScalarExpr(Key); |
185 | 163 | EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true); |
186 | | |
187 | | // Emit the value and store it to the appropriate array slot. |
188 | 163 | const Expr *Value = DLE->getKeyValueElement(i).Value; |
189 | 163 | LValue ValueLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i), |
190 | 163 | ElementType, AlignmentSource::Decl); |
191 | 163 | llvm::Value *valueValue = EmitScalarExpr(Value); |
192 | 163 | EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true); |
193 | 163 | if (TrackNeededObjects) { |
194 | 2 | NeededObjects.push_back(keyValue); |
195 | 2 | NeededObjects.push_back(valueValue); |
196 | 2 | } |
197 | 163 | } |
198 | 441 | } |
199 | | |
200 | | // Generate the argument list. |
201 | 215 | CallArgList Args; |
202 | 215 | ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin(); |
203 | 215 | const ParmVarDecl *argDecl = *PI++; |
204 | 215 | QualType ArgQT = argDecl->getType().getUnqualifiedType(); |
205 | 215 | Args.add(RValue::get(Objects.getPointer()), ArgQT); |
206 | 215 | if (DLE) { |
207 | 103 | argDecl = *PI++; |
208 | 103 | ArgQT = argDecl->getType().getUnqualifiedType(); |
209 | 103 | Args.add(RValue::get(Keys.getPointer()), ArgQT); |
210 | 103 | } |
211 | 215 | argDecl = *PI; |
212 | 215 | ArgQT = argDecl->getType().getUnqualifiedType(); |
213 | 215 | llvm::Value *Count = |
214 | 215 | llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements); |
215 | 215 | Args.add(RValue::get(Count), ArgQT); |
216 | | |
217 | | // Generate a reference to the class pointer, which will be the receiver. |
218 | 215 | Selector Sel = MethodWithObjects->getSelector(); |
219 | 215 | QualType ResultType = E->getType(); |
220 | 215 | const ObjCObjectPointerType *InterfacePointerType |
221 | 215 | = ResultType->getAsObjCInterfacePointerType(); |
222 | 215 | ObjCInterfaceDecl *Class |
223 | 215 | = InterfacePointerType->getObjectType()->getInterface(); |
224 | 215 | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
225 | 215 | llvm::Value *Receiver = Runtime.GetClass(*this, Class); |
226 | | |
227 | | // Generate the message send. |
228 | 215 | RValue result = Runtime.GenerateMessageSend( |
229 | 215 | *this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel, |
230 | 215 | Receiver, Args, Class, MethodWithObjects); |
231 | | |
232 | | // The above message send needs these objects, but in ARC they are |
233 | | // passed in a buffer that is essentially __unsafe_unretained. |
234 | | // Therefore we must prevent the optimizer from releasing them until |
235 | | // after the call. |
236 | 215 | if (TrackNeededObjects) { |
237 | 5 | EmitARCIntrinsicUse(NeededObjects); |
238 | 5 | } |
239 | | |
240 | 215 | return Builder.CreateBitCast(result.getScalarVal(), |
241 | 215 | ConvertType(E->getType())); |
242 | 226 | } |
243 | | |
244 | 119 | llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) { |
245 | 119 | return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod()); |
246 | 119 | } |
247 | | |
248 | | llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral( |
249 | 107 | const ObjCDictionaryLiteral *E) { |
250 | 107 | return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod()); |
251 | 107 | } |
252 | | |
253 | | /// Emit a selector. |
254 | 97 | llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) { |
255 | | // Untyped selector. |
256 | | // Note that this implementation allows for non-constant strings to be passed |
257 | | // as arguments to @selector(). Currently, the only thing preventing this |
258 | | // behaviour is the type checking in the front end. |
259 | 97 | return CGM.getObjCRuntime().GetSelector(*this, E->getSelector()); |
260 | 97 | } |
261 | | |
262 | 17 | llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) { |
263 | | // FIXME: This should pass the Decl not the name. |
264 | 17 | return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol()); |
265 | 17 | } |
266 | | |
267 | | /// Adjust the type of an Objective-C object that doesn't match up due |
268 | | /// to type erasure at various points, e.g., related result types or the use |
269 | | /// of parameterized classes. |
270 | | static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT, |
271 | 12.3k | RValue Result) { |
272 | 12.3k | if (!ExpT->isObjCRetainableType()) |
273 | 5.36k | return Result; |
274 | | |
275 | | // If the converted types are the same, we're done. |
276 | 6.94k | llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT); |
277 | 6.94k | if (ExpLLVMTy == Result.getScalarVal()->getType()) |
278 | 6.71k | return Result; |
279 | | |
280 | | // We have applied a substitution. Cast the rvalue appropriately. |
281 | 231 | return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(), |
282 | 231 | ExpLLVMTy)); |
283 | 6.94k | } |
284 | | |
285 | | /// Decide whether to extend the lifetime of the receiver of a |
286 | | /// returns-inner-pointer message. |
287 | | static bool |
288 | 10 | shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) { |
289 | 10 | switch (message->getReceiverKind()) { |
290 | | |
291 | | // For a normal instance message, we should extend unless the |
292 | | // receiver is loaded from a variable with precise lifetime. |
293 | 10 | case ObjCMessageExpr::Instance: { |
294 | 10 | const Expr *receiver = message->getInstanceReceiver(); |
295 | | |
296 | | // Look through OVEs. |
297 | 10 | if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { |
298 | 4 | if (opaque->getSourceExpr()) |
299 | 4 | receiver = opaque->getSourceExpr()->IgnoreParens(); |
300 | 4 | } |
301 | | |
302 | 10 | const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver); |
303 | 10 | if (!ice || ice->getCastKind() != CK_LValueToRValue) return true0 ; |
304 | 10 | receiver = ice->getSubExpr()->IgnoreParens(); |
305 | | |
306 | | // Look through OVEs. |
307 | 10 | if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { |
308 | 0 | if (opaque->getSourceExpr()) |
309 | 0 | receiver = opaque->getSourceExpr()->IgnoreParens(); |
310 | 0 | } |
311 | | |
312 | | // Only __strong variables. |
313 | 10 | if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong) |
314 | 0 | return true; |
315 | | |
316 | | // All ivars and fields have precise lifetime. |
317 | 10 | if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver)) |
318 | 0 | return false; |
319 | | |
320 | | // Otherwise, check for variables. |
321 | 10 | const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr()); |
322 | 10 | if (!declRef) return true3 ; |
323 | 7 | const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl()); |
324 | 7 | if (!var) return true0 ; |
325 | | |
326 | | // All variables have precise lifetime except local variables with |
327 | | // automatic storage duration that aren't specially marked. |
328 | 7 | return (var->hasLocalStorage() && |
329 | 7 | !var->hasAttr<ObjCPreciseLifetimeAttr>()); |
330 | 7 | } |
331 | | |
332 | 0 | case ObjCMessageExpr::Class: |
333 | 0 | case ObjCMessageExpr::SuperClass: |
334 | | // It's never necessary for class objects. |
335 | 0 | return false; |
336 | | |
337 | 0 | case ObjCMessageExpr::SuperInstance: |
338 | | // We generally assume that 'self' lives throughout a method call. |
339 | 0 | return false; |
340 | 10 | } |
341 | | |
342 | 0 | llvm_unreachable("invalid receiver kind"); |
343 | 0 | } |
344 | | |
345 | | /// Given an expression of ObjC pointer type, check whether it was |
346 | | /// immediately loaded from an ARC __weak l-value. |
347 | 53 | static const Expr *findWeakLValue(const Expr *E) { |
348 | 53 | assert(E->getType()->isObjCRetainableType()); |
349 | 0 | E = E->IgnoreParens(); |
350 | 53 | if (auto CE = dyn_cast<CastExpr>(E)) { |
351 | 50 | if (CE->getCastKind() == CK_LValueToRValue) { |
352 | 50 | if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak) |
353 | 8 | return CE->getSubExpr(); |
354 | 50 | } |
355 | 50 | } |
356 | | |
357 | 45 | return nullptr; |
358 | 53 | } |
359 | | |
360 | | /// The ObjC runtime may provide entrypoints that are likely to be faster |
361 | | /// than an ordinary message send of the appropriate selector. |
362 | | /// |
363 | | /// The entrypoints are guaranteed to be equivalent to just sending the |
364 | | /// corresponding message. If the entrypoint is implemented naively as just a |
365 | | /// message send, using it is a trade-off: it sacrifices a few cycles of |
366 | | /// overhead to save a small amount of code. However, it's possible for |
367 | | /// runtimes to detect and special-case classes that use "standard" |
368 | | /// behavior; if that's dynamically a large proportion of all objects, using |
369 | | /// the entrypoint will also be faster than using a message send. |
370 | | /// |
371 | | /// If the runtime does support a required entrypoint, then this method will |
372 | | /// generate a call and return the resulting value. Otherwise it will return |
373 | | /// None and the caller can generate a msgSend instead. |
374 | | static Optional<llvm::Value *> |
375 | | tryGenerateSpecializedMessageSend(CodeGenFunction &CGF, QualType ResultType, |
376 | | llvm::Value *Receiver, |
377 | | const CallArgList& Args, Selector Sel, |
378 | | const ObjCMethodDecl *method, |
379 | 11.4k | bool isClassMessage) { |
380 | 11.4k | auto &CGM = CGF.CGM; |
381 | 11.4k | if (!CGM.getCodeGenOpts().ObjCConvertMessagesToRuntimeCalls) |
382 | 30 | return None; |
383 | | |
384 | 11.4k | auto &Runtime = CGM.getLangOpts().ObjCRuntime; |
385 | 11.4k | switch (Sel.getMethodFamily()) { |
386 | 1.05k | case OMF_alloc: |
387 | 1.05k | if (isClassMessage && |
388 | 1.05k | Runtime.shouldUseRuntimeFunctionsForAlloc()1.02k && |
389 | 1.05k | ResultType->isObjCObjectPointerType()927 ) { |
390 | | // [Foo alloc] -> objc_alloc(Foo) or |
391 | | // [self alloc] -> objc_alloc(self) |
392 | 927 | if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "alloc"890 ) |
393 | 886 | return CGF.EmitObjCAlloc(Receiver, CGF.ConvertType(ResultType)); |
394 | | // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo) or |
395 | | // [self allocWithZone:nil] -> objc_allocWithZone(self) |
396 | 41 | if (Sel.isKeywordSelector() && Sel.getNumArgs() == 137 && |
397 | 41 | Args.size() == 137 && Args.front().getType()->isPointerType()37 && |
398 | 41 | Sel.getNameForSlot(0) == "allocWithZone"33 ) { |
399 | 33 | const llvm::Value* arg = Args.front().getKnownRValue().getScalarVal(); |
400 | 33 | if (isa<llvm::ConstantPointerNull>(arg)) |
401 | 25 | return CGF.EmitObjCAllocWithZone(Receiver, |
402 | 25 | CGF.ConvertType(ResultType)); |
403 | 8 | return None; |
404 | 33 | } |
405 | 41 | } |
406 | 138 | break; |
407 | | |
408 | 138 | case OMF_autorelease: |
409 | 53 | if (ResultType->isObjCObjectPointerType() && |
410 | 53 | CGM.getLangOpts().getGC() == LangOptions::NonGC && |
411 | 53 | Runtime.shouldUseARCFunctionsForRetainRelease()) |
412 | 43 | return CGF.EmitObjCAutorelease(Receiver, CGF.ConvertType(ResultType)); |
413 | 10 | break; |
414 | | |
415 | 40 | case OMF_retain: |
416 | 40 | if (ResultType->isObjCObjectPointerType() && |
417 | 40 | CGM.getLangOpts().getGC() == LangOptions::NonGC33 && |
418 | 40 | Runtime.shouldUseARCFunctionsForRetainRelease()32 ) |
419 | 17 | return CGF.EmitObjCRetainNonBlock(Receiver, CGF.ConvertType(ResultType)); |
420 | 23 | break; |
421 | | |
422 | 196 | case OMF_release: |
423 | 196 | if (ResultType->isVoidType() && |
424 | 196 | CGM.getLangOpts().getGC() == LangOptions::NonGC195 && |
425 | 196 | Runtime.shouldUseARCFunctionsForRetainRelease()194 ) { |
426 | 186 | CGF.EmitObjCRelease(Receiver, ARCPreciseLifetime); |
427 | 186 | return nullptr; |
428 | 186 | } |
429 | 10 | break; |
430 | | |
431 | 10.0k | default: |
432 | 10.0k | break; |
433 | 11.4k | } |
434 | 10.2k | return None; |
435 | 11.4k | } |
436 | | |
437 | | CodeGen::RValue CGObjCRuntime::GeneratePossiblySpecializedMessageSend( |
438 | | CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, |
439 | | Selector Sel, llvm::Value *Receiver, const CallArgList &Args, |
440 | | const ObjCInterfaceDecl *OID, const ObjCMethodDecl *Method, |
441 | 11.4k | bool isClassMessage) { |
442 | 11.4k | if (Optional<llvm::Value *> SpecializedResult = |
443 | 11.4k | tryGenerateSpecializedMessageSend(CGF, ResultType, Receiver, Args, |
444 | 11.4k | Sel, Method, isClassMessage)) { |
445 | 1.15k | return RValue::get(*SpecializedResult); |
446 | 1.15k | } |
447 | 10.2k | return GenerateMessageSend(CGF, Return, ResultType, Sel, Receiver, Args, OID, |
448 | 10.2k | Method); |
449 | 11.4k | } |
450 | | |
451 | | static void AppendFirstImpliedRuntimeProtocols( |
452 | | const ObjCProtocolDecl *PD, |
453 | 51 | llvm::UniqueVector<const ObjCProtocolDecl *> &PDs) { |
454 | 51 | if (!PD->isNonRuntimeProtocol()) { |
455 | 23 | const auto *Can = PD->getCanonicalDecl(); |
456 | 23 | PDs.insert(Can); |
457 | 23 | return; |
458 | 23 | } |
459 | | |
460 | 28 | for (const auto *ParentPD : PD->protocols()) |
461 | 33 | AppendFirstImpliedRuntimeProtocols(ParentPD, PDs); |
462 | 28 | } |
463 | | |
464 | | std::vector<const ObjCProtocolDecl *> |
465 | | CGObjCRuntime::GetRuntimeProtocolList(ObjCProtocolDecl::protocol_iterator begin, |
466 | 2.36k | ObjCProtocolDecl::protocol_iterator end) { |
467 | 2.36k | std::vector<const ObjCProtocolDecl *> RuntimePds; |
468 | 2.36k | llvm::DenseSet<const ObjCProtocolDecl *> NonRuntimePDs; |
469 | | |
470 | 2.49k | for (; begin != end; ++begin132 ) { |
471 | 132 | const auto *It = *begin; |
472 | 132 | const auto *Can = It->getCanonicalDecl(); |
473 | 132 | if (Can->isNonRuntimeProtocol()) |
474 | 18 | NonRuntimePDs.insert(Can); |
475 | 114 | else |
476 | 114 | RuntimePds.push_back(Can); |
477 | 132 | } |
478 | | |
479 | | // If there are no non-runtime protocols then we can just stop now. |
480 | 2.36k | if (NonRuntimePDs.empty()) |
481 | 2.35k | return RuntimePds; |
482 | | |
483 | | // Else we have to search through the non-runtime protocol's inheritancy |
484 | | // hierarchy DAG stopping whenever a branch either finds a runtime protocol or |
485 | | // a non-runtime protocol without any parents. These are the "first-implied" |
486 | | // protocols from a non-runtime protocol. |
487 | 14 | llvm::UniqueVector<const ObjCProtocolDecl *> FirstImpliedProtos; |
488 | 14 | for (const auto *PD : NonRuntimePDs) |
489 | 18 | AppendFirstImpliedRuntimeProtocols(PD, FirstImpliedProtos); |
490 | | |
491 | | // Walk the Runtime list to get all protocols implied via the inclusion of |
492 | | // this protocol, e.g. all protocols it inherits from including itself. |
493 | 14 | llvm::DenseSet<const ObjCProtocolDecl *> AllImpliedProtocols; |
494 | 14 | for (const auto *PD : RuntimePds) { |
495 | 9 | const auto *Can = PD->getCanonicalDecl(); |
496 | 9 | AllImpliedProtocols.insert(Can); |
497 | 9 | Can->getImpliedProtocols(AllImpliedProtocols); |
498 | 9 | } |
499 | | |
500 | | // Similar to above, walk the list of first-implied protocols to find the set |
501 | | // all the protocols implied excluding the listed protocols themselves since |
502 | | // they are not yet a part of the `RuntimePds` list. |
503 | 18 | for (const auto *PD : FirstImpliedProtos) { |
504 | 18 | PD->getImpliedProtocols(AllImpliedProtocols); |
505 | 18 | } |
506 | | |
507 | | // From the first-implied list we have to finish building the final protocol |
508 | | // list. If a protocol in the first-implied list was already implied via some |
509 | | // inheritance path through some other protocols then it would be redundant to |
510 | | // add it here and so we skip over it. |
511 | 18 | for (const auto *PD : FirstImpliedProtos) { |
512 | 18 | if (!AllImpliedProtocols.contains(PD)) { |
513 | 9 | RuntimePds.push_back(PD); |
514 | 9 | } |
515 | 18 | } |
516 | | |
517 | 14 | return RuntimePds; |
518 | 2.36k | } |
519 | | |
520 | | /// Instead of '[[MyClass alloc] init]', try to generate |
521 | | /// 'objc_alloc_init(MyClass)'. This provides a code size improvement on the |
522 | | /// caller side, as well as the optimized objc_alloc. |
523 | | static Optional<llvm::Value *> |
524 | 12.3k | tryEmitSpecializedAllocInit(CodeGenFunction &CGF, const ObjCMessageExpr *OME) { |
525 | 12.3k | auto &Runtime = CGF.getLangOpts().ObjCRuntime; |
526 | 12.3k | if (!Runtime.shouldUseRuntimeFunctionForCombinedAllocInit()) |
527 | 1.99k | return None; |
528 | | |
529 | | // Match the exact pattern '[[MyClass alloc] init]'. |
530 | 10.3k | Selector Sel = OME->getSelector(); |
531 | 10.3k | if (OME->getReceiverKind() != ObjCMessageExpr::Instance || |
532 | 10.3k | !OME->getType()->isObjCObjectPointerType()6.17k || !Sel.isUnarySelector()2.31k || |
533 | 10.3k | Sel.getNameForSlot(0) != "init"1.17k ) |
534 | 9.93k | return None; |
535 | | |
536 | | // Okay, this is '[receiver init]', check if 'receiver' is '[cls alloc]' |
537 | | // with 'cls' a Class. |
538 | 371 | auto *SubOME = |
539 | 371 | dyn_cast<ObjCMessageExpr>(OME->getInstanceReceiver()->IgnoreParenCasts()); |
540 | 371 | if (!SubOME) |
541 | 0 | return None; |
542 | 371 | Selector SubSel = SubOME->getSelector(); |
543 | | |
544 | 371 | if (!SubOME->getType()->isObjCObjectPointerType() || |
545 | 371 | !SubSel.isUnarySelector() || SubSel.getNameForSlot(0) != "alloc") |
546 | 0 | return None; |
547 | | |
548 | 371 | llvm::Value *Receiver = nullptr; |
549 | 371 | switch (SubOME->getReceiverKind()) { |
550 | 6 | case ObjCMessageExpr::Instance: |
551 | 6 | if (!SubOME->getInstanceReceiver()->getType()->isObjCClassType()) |
552 | 2 | return None; |
553 | 4 | Receiver = CGF.EmitScalarExpr(SubOME->getInstanceReceiver()); |
554 | 4 | break; |
555 | | |
556 | 365 | case ObjCMessageExpr::Class: { |
557 | 365 | QualType ReceiverType = SubOME->getClassReceiver(); |
558 | 365 | const ObjCObjectType *ObjTy = ReceiverType->castAs<ObjCObjectType>(); |
559 | 365 | const ObjCInterfaceDecl *ID = ObjTy->getInterface(); |
560 | 365 | assert(ID && "null interface should be impossible here"); |
561 | 0 | Receiver = CGF.CGM.getObjCRuntime().GetClass(CGF, ID); |
562 | 365 | break; |
563 | 6 | } |
564 | 0 | case ObjCMessageExpr::SuperInstance: |
565 | 0 | case ObjCMessageExpr::SuperClass: |
566 | 0 | return None; |
567 | 371 | } |
568 | | |
569 | 369 | return CGF.EmitObjCAllocInit(Receiver, CGF.ConvertType(OME->getType())); |
570 | 371 | } |
571 | | |
572 | | RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E, |
573 | 12.3k | ReturnValueSlot Return) { |
574 | | // Only the lookup mechanism and first two arguments of the method |
575 | | // implementation vary between runtimes. We can get the receiver and |
576 | | // arguments in generic code. |
577 | | |
578 | 12.3k | bool isDelegateInit = E->isDelegateInitCall(); |
579 | | |
580 | 12.3k | const ObjCMethodDecl *method = E->getMethodDecl(); |
581 | | |
582 | | // If the method is -retain, and the receiver's being loaded from |
583 | | // a __weak variable, peephole the entire operation to objc_loadWeakRetained. |
584 | 12.3k | if (method && E->getReceiverKind() == ObjCMessageExpr::Instance11.7k && |
585 | 12.3k | method->getMethodFamily() == OMF_retain7.08k ) { |
586 | 53 | if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) { |
587 | 8 | LValue lvalue = EmitLValue(lvalueExpr); |
588 | 8 | llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress(*this)); |
589 | 8 | return AdjustObjCObjectType(*this, E->getType(), RValue::get(result)); |
590 | 8 | } |
591 | 53 | } |
592 | | |
593 | 12.3k | if (Optional<llvm::Value *> Val = tryEmitSpecializedAllocInit(*this, E)) |
594 | 369 | return AdjustObjCObjectType(*this, E->getType(), RValue::get(*Val)); |
595 | | |
596 | | // We don't retain the receiver in delegate init calls, and this is |
597 | | // safe because the receiver value is always loaded from 'self', |
598 | | // which we zero out. We don't want to Block_copy block receivers, |
599 | | // though. |
600 | 11.9k | bool retainSelf = |
601 | 11.9k | (!isDelegateInit && |
602 | 11.9k | CGM.getLangOpts().ObjCAutoRefCount11.9k && |
603 | 11.9k | method234 && |
604 | 11.9k | method->hasAttr<NSConsumesSelfAttr>()234 ); |
605 | | |
606 | 11.9k | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
607 | 11.9k | bool isSuperMessage = false; |
608 | 11.9k | bool isClassMessage = false; |
609 | 11.9k | ObjCInterfaceDecl *OID = nullptr; |
610 | | // Find the receiver |
611 | 11.9k | QualType ReceiverType; |
612 | 11.9k | llvm::Value *Receiver = nullptr; |
613 | 11.9k | switch (E->getReceiverKind()) { |
614 | 7.30k | case ObjCMessageExpr::Instance: |
615 | 7.30k | ReceiverType = E->getInstanceReceiver()->getType(); |
616 | 7.30k | isClassMessage = ReceiverType->isObjCClassType(); |
617 | 7.30k | if (retainSelf) { |
618 | 11 | TryEmitResult ter = tryEmitARCRetainScalarExpr(*this, |
619 | 11 | E->getInstanceReceiver()); |
620 | 11 | Receiver = ter.getPointer(); |
621 | 11 | if (ter.getInt()) retainSelf = false; |
622 | 11 | } else |
623 | 7.29k | Receiver = EmitScalarExpr(E->getInstanceReceiver()); |
624 | 7.30k | break; |
625 | | |
626 | 4.14k | case ObjCMessageExpr::Class: { |
627 | 4.14k | ReceiverType = E->getClassReceiver(); |
628 | 4.14k | OID = ReceiverType->castAs<ObjCObjectType>()->getInterface(); |
629 | 4.14k | assert(OID && "Invalid Objective-C class message send"); |
630 | 0 | Receiver = Runtime.GetClass(*this, OID); |
631 | 4.14k | isClassMessage = true; |
632 | 4.14k | break; |
633 | 0 | } |
634 | | |
635 | 443 | case ObjCMessageExpr::SuperInstance: |
636 | 443 | ReceiverType = E->getSuperType(); |
637 | 443 | Receiver = LoadObjCSelf(); |
638 | 443 | isSuperMessage = true; |
639 | 443 | break; |
640 | | |
641 | 42 | case ObjCMessageExpr::SuperClass: |
642 | 42 | ReceiverType = E->getSuperType(); |
643 | 42 | Receiver = LoadObjCSelf(); |
644 | 42 | isSuperMessage = true; |
645 | 42 | isClassMessage = true; |
646 | 42 | break; |
647 | 11.9k | } |
648 | | |
649 | 11.9k | if (retainSelf) |
650 | 0 | Receiver = EmitARCRetainNonBlock(Receiver); |
651 | | |
652 | | // In ARC, we sometimes want to "extend the lifetime" |
653 | | // (i.e. retain+autorelease) of receivers of returns-inner-pointer |
654 | | // messages. |
655 | 11.9k | if (getLangOpts().ObjCAutoRefCount && method243 && |
656 | 11.9k | method->hasAttr<ObjCReturnsInnerPointerAttr>()243 && |
657 | 11.9k | shouldExtendReceiverForInnerPointerMessage(E)10 ) |
658 | 6 | Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver); |
659 | | |
660 | 11.9k | QualType ResultType = method ? method->getReturnType()11.3k : E->getType()607 ; |
661 | | |
662 | 11.9k | CallArgList Args; |
663 | 11.9k | EmitCallArgs(Args, method, E->arguments(), /*AC*/AbstractCallee(method)); |
664 | | |
665 | | // For delegate init calls in ARC, do an unsafe store of null into |
666 | | // self. This represents the call taking direct ownership of that |
667 | | // value. We have to do this after emitting the other call |
668 | | // arguments because they might also reference self, but we don't |
669 | | // have to worry about any of them modifying self because that would |
670 | | // be an undefined read and write of an object in unordered |
671 | | // expressions. |
672 | 11.9k | if (isDelegateInit) { |
673 | 9 | assert(getLangOpts().ObjCAutoRefCount && |
674 | 9 | "delegate init calls should only be marked in ARC"); |
675 | | |
676 | | // Do an unsafe store of null into self. |
677 | 0 | Address selfAddr = |
678 | 9 | GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); |
679 | 9 | Builder.CreateStore(getNullForVariable(selfAddr), selfAddr); |
680 | 9 | } |
681 | | |
682 | 0 | RValue result; |
683 | 11.9k | if (isSuperMessage) { |
684 | | // super is only valid in an Objective-C method |
685 | 485 | const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); |
686 | 485 | bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext()); |
687 | 485 | result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType, |
688 | 485 | E->getSelector(), |
689 | 485 | OMD->getClassInterface(), |
690 | 485 | isCategoryImpl, |
691 | 485 | Receiver, |
692 | 485 | isClassMessage, |
693 | 485 | Args, |
694 | 485 | method); |
695 | 11.4k | } else { |
696 | | // Call runtime methods directly if we can. |
697 | 11.4k | result = Runtime.GeneratePossiblySpecializedMessageSend( |
698 | 11.4k | *this, Return, ResultType, E->getSelector(), Receiver, Args, OID, |
699 | 11.4k | method, isClassMessage); |
700 | 11.4k | } |
701 | | |
702 | | // For delegate init calls in ARC, implicitly store the result of |
703 | | // the call back into self. This takes ownership of the value. |
704 | 11.9k | if (isDelegateInit) { |
705 | 9 | Address selfAddr = |
706 | 9 | GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); |
707 | 9 | llvm::Value *newSelf = result.getScalarVal(); |
708 | | |
709 | | // The delegate return type isn't necessarily a matching type; in |
710 | | // fact, it's quite likely to be 'id'. |
711 | 9 | llvm::Type *selfTy = selfAddr.getElementType(); |
712 | 9 | newSelf = Builder.CreateBitCast(newSelf, selfTy); |
713 | | |
714 | 9 | Builder.CreateStore(newSelf, selfAddr); |
715 | 9 | } |
716 | | |
717 | 11.9k | return AdjustObjCObjectType(*this, E->getType(), result); |
718 | 11.9k | } |
719 | | |
720 | | namespace { |
721 | | struct FinishARCDealloc final : EHScopeStack::Cleanup { |
722 | 8 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
723 | 8 | const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl); |
724 | | |
725 | 8 | const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext()); |
726 | 8 | const ObjCInterfaceDecl *iface = impl->getClassInterface(); |
727 | 8 | if (!iface->getSuperClass()) return0 ; |
728 | | |
729 | 8 | bool isCategory = isa<ObjCCategoryImplDecl>(impl); |
730 | | |
731 | | // Call [super dealloc] if we have a superclass. |
732 | 8 | llvm::Value *self = CGF.LoadObjCSelf(); |
733 | | |
734 | 8 | CallArgList args; |
735 | 8 | CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(), |
736 | 8 | CGF.getContext().VoidTy, |
737 | 8 | method->getSelector(), |
738 | 8 | iface, |
739 | 8 | isCategory, |
740 | 8 | self, |
741 | 8 | /*is class msg*/ false, |
742 | 8 | args, |
743 | 8 | method); |
744 | 8 | } |
745 | | }; |
746 | | } |
747 | | |
748 | | /// StartObjCMethod - Begin emission of an ObjCMethod. This generates |
749 | | /// the LLVM function and sets the other context used by |
750 | | /// CodeGenFunction. |
751 | | void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD, |
752 | 2.58k | const ObjCContainerDecl *CD) { |
753 | 2.58k | SourceLocation StartLoc = OMD->getBeginLoc(); |
754 | 2.58k | FunctionArgList args; |
755 | | // Check if we should generate debug info for this method. |
756 | 2.58k | if (OMD->hasAttr<NoDebugAttr>()) |
757 | 0 | DebugInfo = nullptr; // disable debug info indefinitely for this function |
758 | | |
759 | 2.58k | llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD); |
760 | | |
761 | 2.58k | const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD); |
762 | 2.58k | if (OMD->isDirectMethod()) { |
763 | 40 | Fn->setVisibility(llvm::Function::HiddenVisibility); |
764 | 40 | CGM.SetLLVMFunctionAttributes(OMD, FI, Fn, /*IsThunk=*/false); |
765 | 40 | CGM.SetLLVMFunctionAttributesForDefinition(OMD, Fn); |
766 | 2.54k | } else { |
767 | 2.54k | CGM.SetInternalFunctionAttributes(OMD, Fn, FI); |
768 | 2.54k | } |
769 | | |
770 | 2.58k | args.push_back(OMD->getSelfDecl()); |
771 | 2.58k | args.push_back(OMD->getCmdDecl()); |
772 | | |
773 | 2.58k | args.append(OMD->param_begin(), OMD->param_end()); |
774 | | |
775 | 2.58k | CurGD = OMD; |
776 | 2.58k | CurEHLocation = OMD->getEndLoc(); |
777 | | |
778 | 2.58k | StartFunction(OMD, OMD->getReturnType(), Fn, FI, args, |
779 | 2.58k | OMD->getLocation(), StartLoc); |
780 | | |
781 | 2.58k | if (OMD->isDirectMethod()) { |
782 | | // This function is a direct call, it has to implement a nil check |
783 | | // on entry. |
784 | | // |
785 | | // TODO: possibly have several entry points to elide the check |
786 | 40 | CGM.getObjCRuntime().GenerateDirectMethodPrologue(*this, Fn, OMD, CD); |
787 | 40 | } |
788 | | |
789 | | // In ARC, certain methods get an extra cleanup. |
790 | 2.58k | if (CGM.getLangOpts().ObjCAutoRefCount && |
791 | 2.58k | OMD->isInstanceMethod()299 && |
792 | 2.58k | OMD->getSelector().isUnarySelector()277 ) { |
793 | 194 | const IdentifierInfo *ident = |
794 | 194 | OMD->getSelector().getIdentifierInfoForSlot(0); |
795 | 194 | if (ident->isStr("dealloc")) |
796 | 8 | EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind()); |
797 | 194 | } |
798 | 2.58k | } |
799 | | |
800 | | static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, |
801 | | LValue lvalue, QualType type); |
802 | | |
803 | | /// Generate an Objective-C method. An Objective-C method is a C function with |
804 | | /// its pointer, name, and types registered in the class structure. |
805 | 1.59k | void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) { |
806 | 1.59k | StartObjCMethod(OMD, OMD->getClassInterface()); |
807 | 1.59k | PGO.assignRegionCounters(GlobalDecl(OMD), CurFn); |
808 | 1.59k | assert(isa<CompoundStmt>(OMD->getBody())); |
809 | 0 | incrementProfileCounter(OMD->getBody()); |
810 | 1.59k | EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody())); |
811 | 1.59k | FinishFunction(OMD->getBodyRBrace()); |
812 | 1.59k | } |
813 | | |
814 | | /// emitStructGetterCall - Call the runtime function to load a property |
815 | | /// into the return value slot. |
816 | | static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar, |
817 | 24 | bool isAtomic, bool hasStrong) { |
818 | 24 | ASTContext &Context = CGF.getContext(); |
819 | | |
820 | 24 | llvm::Value *src = |
821 | 24 | CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) |
822 | 24 | .getPointer(CGF); |
823 | | |
824 | | // objc_copyStruct (ReturnValue, &structIvar, |
825 | | // sizeof (Type of Ivar), isAtomic, false); |
826 | 24 | CallArgList args; |
827 | | |
828 | 24 | llvm::Value *dest = |
829 | 24 | CGF.Builder.CreateBitCast(CGF.ReturnValue.getPointer(), CGF.VoidPtrTy); |
830 | 24 | args.add(RValue::get(dest), Context.VoidPtrTy); |
831 | | |
832 | 24 | src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy); |
833 | 24 | args.add(RValue::get(src), Context.VoidPtrTy); |
834 | | |
835 | 24 | CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType()); |
836 | 24 | args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType()); |
837 | 24 | args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy); |
838 | 24 | args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy); |
839 | | |
840 | 24 | llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetGetStructFunction(); |
841 | 24 | CGCallee callee = CGCallee::forDirect(fn); |
842 | 24 | CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args), |
843 | 24 | callee, ReturnValueSlot(), args); |
844 | 24 | } |
845 | | |
846 | | /// Determine whether the given architecture supports unaligned atomic |
847 | | /// accesses. They don't have to be fast, just faster than a function |
848 | | /// call and a mutex. |
849 | 31 | static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) { |
850 | | // FIXME: Allow unaligned atomic load/store on x86. (It is not |
851 | | // currently supported by the backend.) |
852 | 31 | return false; |
853 | 31 | } |
854 | | |
855 | | /// Return the maximum size that permits atomic accesses for the given |
856 | | /// architecture. |
857 | | static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM, |
858 | 416 | llvm::Triple::ArchType arch) { |
859 | | // ARM has 8-byte atomic accesses, but it's not clear whether we |
860 | | // want to rely on them here. |
861 | | |
862 | | // In the default case, just assume that any size up to a pointer is |
863 | | // fine given adequate alignment. |
864 | 416 | return CharUnits::fromQuantity(CGM.PointerSizeInBytes); |
865 | 416 | } |
866 | | |
867 | | namespace { |
868 | | class PropertyImplStrategy { |
869 | | public: |
870 | | enum StrategyKind { |
871 | | /// The 'native' strategy is to use the architecture's provided |
872 | | /// reads and writes. |
873 | | Native, |
874 | | |
875 | | /// Use objc_setProperty and objc_getProperty. |
876 | | GetSetProperty, |
877 | | |
878 | | /// Use objc_setProperty for the setter, but use expression |
879 | | /// evaluation for the getter. |
880 | | SetPropertyAndExpressionGet, |
881 | | |
882 | | /// Use objc_copyStruct. |
883 | | CopyStruct, |
884 | | |
885 | | /// The 'expression' strategy is to emit normal assignment or |
886 | | /// lvalue-to-rvalue expressions. |
887 | | Expression |
888 | | }; |
889 | | |
890 | 872 | StrategyKind getKind() const { return StrategyKind(Kind); } |
891 | | |
892 | 24 | bool hasStrongMember() const { return HasStrong; } |
893 | 281 | bool isAtomic() const { return IsAtomic; } |
894 | 157 | bool isCopy() const { return IsCopy; } |
895 | | |
896 | 818 | CharUnits getIvarSize() const { return IvarSize; } |
897 | 0 | CharUnits getIvarAlignment() const { return IvarAlignment; } |
898 | | |
899 | | PropertyImplStrategy(CodeGenModule &CGM, |
900 | | const ObjCPropertyImplDecl *propImpl); |
901 | | |
902 | | private: |
903 | | unsigned Kind : 8; |
904 | | unsigned IsAtomic : 1; |
905 | | unsigned IsCopy : 1; |
906 | | unsigned HasStrong : 1; |
907 | | |
908 | | CharUnits IvarSize; |
909 | | CharUnits IvarAlignment; |
910 | | }; |
911 | | } |
912 | | |
913 | | /// Pick an implementation strategy for the given property synthesis. |
914 | | PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM, |
915 | 872 | const ObjCPropertyImplDecl *propImpl) { |
916 | 872 | const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); |
917 | 872 | ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind(); |
918 | | |
919 | 872 | IsCopy = (setterKind == ObjCPropertyDecl::Copy); |
920 | 872 | IsAtomic = prop->isAtomic(); |
921 | 872 | HasStrong = false; // doesn't matter here. |
922 | | |
923 | | // Evaluate the ivar's size and alignment. |
924 | 872 | ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); |
925 | 872 | QualType ivarType = ivar->getType(); |
926 | 872 | auto TInfo = CGM.getContext().getTypeInfoInChars(ivarType); |
927 | 872 | IvarSize = TInfo.Width; |
928 | 872 | IvarAlignment = TInfo.Align; |
929 | | |
930 | | // If we have a copy property, we always have to use setProperty. |
931 | | // If the property is atomic we need to use getProperty, but in |
932 | | // the nonatomic case we can just use expression. |
933 | 872 | if (IsCopy) { |
934 | 80 | Kind = IsAtomic ? GetSetProperty60 : SetPropertyAndExpressionGet20 ; |
935 | 80 | return; |
936 | 80 | } |
937 | | |
938 | | // Handle retain. |
939 | 792 | if (setterKind == ObjCPropertyDecl::Retain) { |
940 | | // In GC-only, there's nothing special that needs to be done. |
941 | 246 | if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) { |
942 | | // fallthrough |
943 | | |
944 | | // In ARC, if the property is non-atomic, use expression emission, |
945 | | // which translates to objc_storeStrong. This isn't required, but |
946 | | // it's slightly nicer. |
947 | 242 | } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic36 ) { |
948 | | // Using standard expression emission for the setter is only |
949 | | // acceptable if the ivar is __strong, which won't be true if |
950 | | // the property is annotated with __attribute__((NSObject)). |
951 | | // TODO: falling all the way back to objc_setProperty here is |
952 | | // just laziness, though; we could still use objc_storeStrong |
953 | | // if we hacked it right. |
954 | 10 | if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong) |
955 | 8 | Kind = Expression; |
956 | 2 | else |
957 | 2 | Kind = SetPropertyAndExpressionGet; |
958 | 10 | return; |
959 | | |
960 | | // Otherwise, we need to at least use setProperty. However, if |
961 | | // the property isn't atomic, we can use normal expression |
962 | | // emission for the getter. |
963 | 232 | } else if (!IsAtomic) { |
964 | 92 | Kind = SetPropertyAndExpressionGet; |
965 | 92 | return; |
966 | | |
967 | | // Otherwise, we have to use both setProperty and getProperty. |
968 | 140 | } else { |
969 | 140 | Kind = GetSetProperty; |
970 | 140 | return; |
971 | 140 | } |
972 | 246 | } |
973 | | |
974 | | // If we're not atomic, just use expression accesses. |
975 | 550 | if (!IsAtomic) { |
976 | 57 | Kind = Expression; |
977 | 57 | return; |
978 | 57 | } |
979 | | |
980 | | // Properties on bitfield ivars need to be emitted using expression |
981 | | // accesses even if they're nominally atomic. |
982 | 493 | if (ivar->isBitField()) { |
983 | 2 | Kind = Expression; |
984 | 2 | return; |
985 | 2 | } |
986 | | |
987 | | // GC-qualified or ARC-qualified ivars need to be emitted as |
988 | | // expressions. This actually works out to being atomic anyway, |
989 | | // except for ARC __strong, but that should trigger the above code. |
990 | 491 | if (ivarType.hasNonTrivialObjCLifetime() || |
991 | 491 | (489 CGM.getLangOpts().getGC()489 && |
992 | 489 | CGM.getContext().getObjCGCAttrKind(ivarType)60 )) { |
993 | 32 | Kind = Expression; |
994 | 32 | return; |
995 | 32 | } |
996 | | |
997 | | // Compute whether the ivar has strong members. |
998 | 459 | if (CGM.getLangOpts().getGC()) |
999 | 30 | if (const RecordType *recordType = ivarType->getAs<RecordType>()) |
1000 | 14 | HasStrong = recordType->getDecl()->hasObjectMember(); |
1001 | | |
1002 | | // We can never access structs with object members with a native |
1003 | | // access, because we need to use write barriers. This is what |
1004 | | // objc_copyStruct is for. |
1005 | 459 | if (HasStrong) { |
1006 | 6 | Kind = CopyStruct; |
1007 | 6 | return; |
1008 | 6 | } |
1009 | | |
1010 | | // Otherwise, this is target-dependent and based on the size and |
1011 | | // alignment of the ivar. |
1012 | | |
1013 | | // If the size of the ivar is not a power of two, give up. We don't |
1014 | | // want to get into the business of doing compare-and-swaps. |
1015 | 453 | if (!IvarSize.isPowerOfTwo()) { |
1016 | 6 | Kind = CopyStruct; |
1017 | 6 | return; |
1018 | 6 | } |
1019 | | |
1020 | 447 | llvm::Triple::ArchType arch = |
1021 | 447 | CGM.getTarget().getTriple().getArch(); |
1022 | | |
1023 | | // Most architectures require memory to fit within a single cache |
1024 | | // line, so the alignment has to be at least the size of the access. |
1025 | | // Otherwise we have to grab a lock. |
1026 | 447 | if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)31 ) { |
1027 | 31 | Kind = CopyStruct; |
1028 | 31 | return; |
1029 | 31 | } |
1030 | | |
1031 | | // If the ivar's size exceeds the architecture's maximum atomic |
1032 | | // access size, we have to use CopyStruct. |
1033 | 416 | if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) { |
1034 | 6 | Kind = CopyStruct; |
1035 | 6 | return; |
1036 | 6 | } |
1037 | | |
1038 | | // Otherwise, we can use native loads and stores. |
1039 | 410 | Kind = Native; |
1040 | 410 | } |
1041 | | |
1042 | | /// Generate an Objective-C property getter function. |
1043 | | /// |
1044 | | /// The given Decl must be an ObjCImplementationDecl. \@synthesize |
1045 | | /// is illegal within a category. |
1046 | | void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP, |
1047 | 453 | const ObjCPropertyImplDecl *PID) { |
1048 | 453 | llvm::Constant *AtomicHelperFn = |
1049 | 453 | CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID); |
1050 | 453 | ObjCMethodDecl *OMD = PID->getGetterMethodDecl(); |
1051 | 453 | assert(OMD && "Invalid call to generate getter (empty method)"); |
1052 | 0 | StartObjCMethod(OMD, IMP->getClassInterface()); |
1053 | | |
1054 | 453 | generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn); |
1055 | | |
1056 | 453 | FinishFunction(OMD->getEndLoc()); |
1057 | 453 | } |
1058 | | |
1059 | 467 | static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) { |
1060 | 467 | const Expr *getter = propImpl->getGetterCXXConstructor(); |
1061 | 467 | if (!getter) return true432 ; |
1062 | | |
1063 | | // Sema only makes only of these when the ivar has a C++ class type, |
1064 | | // so the form is pretty constrained. |
1065 | | |
1066 | | // If the property has a reference type, we might just be binding a |
1067 | | // reference, in which case the result will be a gl-value. We should |
1068 | | // treat this as a non-trivial operation. |
1069 | 35 | if (getter->isGLValue()) |
1070 | 2 | return false; |
1071 | | |
1072 | | // If we selected a trivial copy-constructor, we're okay. |
1073 | 33 | if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter)) |
1074 | 33 | return (construct->getConstructor()->isTrivial()); |
1075 | | |
1076 | | // The constructor might require cleanups (in which case it's never |
1077 | | // trivial). |
1078 | 0 | assert(isa<ExprWithCleanups>(getter)); |
1079 | 0 | return false; |
1080 | 33 | } |
1081 | | |
1082 | | /// emitCPPObjectAtomicGetterCall - Call the runtime function to |
1083 | | /// copy the ivar into the resturn slot. |
1084 | | static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF, |
1085 | | llvm::Value *returnAddr, |
1086 | | ObjCIvarDecl *ivar, |
1087 | 6 | llvm::Constant *AtomicHelperFn) { |
1088 | | // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar, |
1089 | | // AtomicHelperFn); |
1090 | 6 | CallArgList args; |
1091 | | |
1092 | | // The 1st argument is the return Slot. |
1093 | 6 | args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy); |
1094 | | |
1095 | | // The 2nd argument is the address of the ivar. |
1096 | 6 | llvm::Value *ivarAddr = |
1097 | 6 | CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) |
1098 | 6 | .getPointer(CGF); |
1099 | 6 | ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); |
1100 | 6 | args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); |
1101 | | |
1102 | | // Third argument is the helper function. |
1103 | 6 | args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); |
1104 | | |
1105 | 6 | llvm::FunctionCallee copyCppAtomicObjectFn = |
1106 | 6 | CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction(); |
1107 | 6 | CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn); |
1108 | 6 | CGF.EmitCall( |
1109 | 6 | CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), |
1110 | 6 | callee, ReturnValueSlot(), args); |
1111 | 6 | } |
1112 | | |
1113 | | void |
1114 | | CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, |
1115 | | const ObjCPropertyImplDecl *propImpl, |
1116 | | const ObjCMethodDecl *GetterMethodDecl, |
1117 | 453 | llvm::Constant *AtomicHelperFn) { |
1118 | | // If there's a non-trivial 'get' expression, we just have to emit that. |
1119 | 453 | if (!hasTrivialGetExpr(propImpl)) { |
1120 | 11 | if (!AtomicHelperFn) { |
1121 | 5 | auto *ret = ReturnStmt::Create(getContext(), SourceLocation(), |
1122 | 5 | propImpl->getGetterCXXConstructor(), |
1123 | 5 | /* NRVOCandidate=*/nullptr); |
1124 | 5 | EmitReturnStmt(*ret); |
1125 | 5 | } |
1126 | 6 | else { |
1127 | 6 | ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); |
1128 | 6 | emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), |
1129 | 6 | ivar, AtomicHelperFn); |
1130 | 6 | } |
1131 | 11 | return; |
1132 | 11 | } |
1133 | | |
1134 | 442 | const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); |
1135 | 442 | QualType propType = prop->getType(); |
1136 | 442 | ObjCMethodDecl *getterMethod = propImpl->getGetterMethodDecl(); |
1137 | | |
1138 | 442 | ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); |
1139 | | |
1140 | | // Pick an implementation strategy. |
1141 | 442 | PropertyImplStrategy strategy(CGM, propImpl); |
1142 | 442 | switch (strategy.getKind()) { |
1143 | 211 | case PropertyImplStrategy::Native: { |
1144 | | // We don't need to do anything for a zero-size struct. |
1145 | 211 | if (strategy.getIvarSize().isZero()) |
1146 | 1 | return; |
1147 | | |
1148 | 210 | LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); |
1149 | | |
1150 | | // Currently, all atomic accesses have to be through integer |
1151 | | // types, so there's no point in trying to pick a prettier type. |
1152 | 210 | uint64_t ivarSize = getContext().toBits(strategy.getIvarSize()); |
1153 | 210 | llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize); |
1154 | | |
1155 | | // Perform an atomic load. This does not impose ordering constraints. |
1156 | 210 | Address ivarAddr = LV.getAddress(*this); |
1157 | 210 | ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType); |
1158 | 210 | llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load"); |
1159 | 210 | load->setAtomic(llvm::AtomicOrdering::Unordered); |
1160 | | |
1161 | | // Store that value into the return address. Doing this with a |
1162 | | // bitcast is likely to produce some pretty ugly IR, but it's not |
1163 | | // the *most* terrible thing in the world. |
1164 | 210 | llvm::Type *retTy = ConvertType(getterMethod->getReturnType()); |
1165 | 210 | uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy); |
1166 | 210 | llvm::Value *ivarVal = load; |
1167 | 210 | if (ivarSize > retTySize) { |
1168 | 1 | bitcastType = llvm::Type::getIntNTy(getLLVMContext(), retTySize); |
1169 | 1 | ivarVal = Builder.CreateTrunc(load, bitcastType); |
1170 | 1 | } |
1171 | 210 | Builder.CreateStore(ivarVal, |
1172 | 210 | Builder.CreateElementBitCast(ReturnValue, bitcastType)); |
1173 | | |
1174 | | // Make sure we don't do an autorelease. |
1175 | 210 | AutoreleaseResult = false; |
1176 | 210 | return; |
1177 | 211 | } |
1178 | | |
1179 | 100 | case PropertyImplStrategy::GetSetProperty: { |
1180 | 100 | llvm::FunctionCallee getPropertyFn = |
1181 | 100 | CGM.getObjCRuntime().GetPropertyGetFunction(); |
1182 | 100 | if (!getPropertyFn) { |
1183 | 0 | CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy"); |
1184 | 0 | return; |
1185 | 0 | } |
1186 | 100 | CGCallee callee = CGCallee::forDirect(getPropertyFn); |
1187 | | |
1188 | | // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true). |
1189 | | // FIXME: Can't this be simpler? This might even be worse than the |
1190 | | // corresponding gcc code. |
1191 | 100 | llvm::Value *cmd = |
1192 | 100 | Builder.CreateLoad(GetAddrOfLocalVar(getterMethod->getCmdDecl()), "cmd"); |
1193 | 100 | llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); |
1194 | 100 | llvm::Value *ivarOffset = |
1195 | 100 | EmitIvarOffset(classImpl->getClassInterface(), ivar); |
1196 | | |
1197 | 100 | CallArgList args; |
1198 | 100 | args.add(RValue::get(self), getContext().getObjCIdType()); |
1199 | 100 | args.add(RValue::get(cmd), getContext().getObjCSelType()); |
1200 | 100 | args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); |
1201 | 100 | args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), |
1202 | 100 | getContext().BoolTy); |
1203 | | |
1204 | | // FIXME: We shouldn't need to get the function info here, the |
1205 | | // runtime already should have computed it to build the function. |
1206 | 100 | llvm::CallBase *CallInstruction; |
1207 | 100 | RValue RV = EmitCall(getTypes().arrangeBuiltinFunctionCall( |
1208 | 100 | getContext().getObjCIdType(), args), |
1209 | 100 | callee, ReturnValueSlot(), args, &CallInstruction); |
1210 | 100 | if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction)) |
1211 | 100 | call->setTailCall(); |
1212 | | |
1213 | | // We need to fix the type here. Ivars with copy & retain are |
1214 | | // always objects so we don't need to worry about complex or |
1215 | | // aggregates. |
1216 | 100 | RV = RValue::get(Builder.CreateBitCast( |
1217 | 100 | RV.getScalarVal(), |
1218 | 100 | getTypes().ConvertType(getterMethod->getReturnType()))); |
1219 | | |
1220 | 100 | EmitReturnOfRValue(RV, propType); |
1221 | | |
1222 | | // objc_getProperty does an autorelease, so we should suppress ours. |
1223 | 100 | AutoreleaseResult = false; |
1224 | | |
1225 | 100 | return; |
1226 | 100 | } |
1227 | | |
1228 | 24 | case PropertyImplStrategy::CopyStruct: |
1229 | 24 | emitStructGetterCall(*this, ivar, strategy.isAtomic(), |
1230 | 24 | strategy.hasStrongMember()); |
1231 | 24 | return; |
1232 | | |
1233 | 50 | case PropertyImplStrategy::Expression: |
1234 | 107 | case PropertyImplStrategy::SetPropertyAndExpressionGet: { |
1235 | 107 | LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); |
1236 | | |
1237 | 107 | QualType ivarType = ivar->getType(); |
1238 | 107 | switch (getEvaluationKind(ivarType)) { |
1239 | 0 | case TEK_Complex: { |
1240 | 0 | ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation()); |
1241 | 0 | EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType), |
1242 | 0 | /*init*/ true); |
1243 | 0 | return; |
1244 | 0 | } |
1245 | 4 | case TEK_Aggregate: { |
1246 | | // The return value slot is guaranteed to not be aliased, but |
1247 | | // that's not necessarily the same as "on the stack", so |
1248 | | // we still potentially need objc_memmove_collectable. |
1249 | 4 | EmitAggregateCopy(/* Dest= */ MakeAddrLValue(ReturnValue, ivarType), |
1250 | 4 | /* Src= */ LV, ivarType, getOverlapForReturnValue()); |
1251 | 4 | return; |
1252 | 0 | } |
1253 | 103 | case TEK_Scalar: { |
1254 | 103 | llvm::Value *value; |
1255 | 103 | if (propType->isReferenceType()) { |
1256 | 0 | value = LV.getAddress(*this).getPointer(); |
1257 | 103 | } else { |
1258 | | // We want to load and autoreleaseReturnValue ARC __weak ivars. |
1259 | 103 | if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { |
1260 | 7 | if (getLangOpts().ObjCAutoRefCount) { |
1261 | 6 | value = emitARCRetainLoadOfScalar(*this, LV, ivarType); |
1262 | 6 | } else { |
1263 | 1 | value = EmitARCLoadWeak(LV.getAddress(*this)); |
1264 | 1 | } |
1265 | | |
1266 | | // Otherwise we want to do a simple load, suppressing the |
1267 | | // final autorelease. |
1268 | 96 | } else { |
1269 | 96 | value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal(); |
1270 | 96 | AutoreleaseResult = false; |
1271 | 96 | } |
1272 | | |
1273 | 103 | value = Builder.CreateBitCast( |
1274 | 103 | value, ConvertType(GetterMethodDecl->getReturnType())); |
1275 | 103 | } |
1276 | | |
1277 | 103 | EmitReturnOfRValue(RValue::get(value), propType); |
1278 | 103 | return; |
1279 | 0 | } |
1280 | 107 | } |
1281 | 0 | llvm_unreachable("bad evaluation kind"); |
1282 | 0 | } |
1283 | | |
1284 | 442 | } |
1285 | 0 | llvm_unreachable("bad @property implementation strategy!"); |
1286 | 0 | } |
1287 | | |
1288 | | /// emitStructSetterCall - Call the runtime function to store the value |
1289 | | /// from the first formal parameter into the given ivar. |
1290 | | static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD, |
1291 | 25 | ObjCIvarDecl *ivar) { |
1292 | | // objc_copyStruct (&structIvar, &Arg, |
1293 | | // sizeof (struct something), true, false); |
1294 | 25 | CallArgList args; |
1295 | | |
1296 | | // The first argument is the address of the ivar. |
1297 | 25 | llvm::Value *ivarAddr = |
1298 | 25 | CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) |
1299 | 25 | .getPointer(CGF); |
1300 | 25 | ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); |
1301 | 25 | args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); |
1302 | | |
1303 | | // The second argument is the address of the parameter variable. |
1304 | 25 | ParmVarDecl *argVar = *OMD->param_begin(); |
1305 | 25 | DeclRefExpr argRef(CGF.getContext(), argVar, false, |
1306 | 25 | argVar->getType().getNonReferenceType(), VK_LValue, |
1307 | 25 | SourceLocation()); |
1308 | 25 | llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF); |
1309 | 25 | argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); |
1310 | 25 | args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); |
1311 | | |
1312 | | // The third argument is the sizeof the type. |
1313 | 25 | llvm::Value *size = |
1314 | 25 | CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType())); |
1315 | 25 | args.add(RValue::get(size), CGF.getContext().getSizeType()); |
1316 | | |
1317 | | // The fourth argument is the 'isAtomic' flag. |
1318 | 25 | args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy); |
1319 | | |
1320 | | // The fifth argument is the 'hasStrong' flag. |
1321 | | // FIXME: should this really always be false? |
1322 | 25 | args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy); |
1323 | | |
1324 | 25 | llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetSetStructFunction(); |
1325 | 25 | CGCallee callee = CGCallee::forDirect(fn); |
1326 | 25 | CGF.EmitCall( |
1327 | 25 | CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), |
1328 | 25 | callee, ReturnValueSlot(), args); |
1329 | 25 | } |
1330 | | |
1331 | | /// emitCPPObjectAtomicSetterCall - Call the runtime function to store |
1332 | | /// the value from the first formal parameter into the given ivar, using |
1333 | | /// the Cpp API for atomic Cpp objects with non-trivial copy assignment. |
1334 | | static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF, |
1335 | | ObjCMethodDecl *OMD, |
1336 | | ObjCIvarDecl *ivar, |
1337 | 7 | llvm::Constant *AtomicHelperFn) { |
1338 | | // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg, |
1339 | | // AtomicHelperFn); |
1340 | 7 | CallArgList args; |
1341 | | |
1342 | | // The first argument is the address of the ivar. |
1343 | 7 | llvm::Value *ivarAddr = |
1344 | 7 | CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) |
1345 | 7 | .getPointer(CGF); |
1346 | 7 | ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); |
1347 | 7 | args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); |
1348 | | |
1349 | | // The second argument is the address of the parameter variable. |
1350 | 7 | ParmVarDecl *argVar = *OMD->param_begin(); |
1351 | 7 | DeclRefExpr argRef(CGF.getContext(), argVar, false, |
1352 | 7 | argVar->getType().getNonReferenceType(), VK_LValue, |
1353 | 7 | SourceLocation()); |
1354 | 7 | llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF); |
1355 | 7 | argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); |
1356 | 7 | args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); |
1357 | | |
1358 | | // Third argument is the helper function. |
1359 | 7 | args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); |
1360 | | |
1361 | 7 | llvm::FunctionCallee fn = |
1362 | 7 | CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction(); |
1363 | 7 | CGCallee callee = CGCallee::forDirect(fn); |
1364 | 7 | CGF.EmitCall( |
1365 | 7 | CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), |
1366 | 7 | callee, ReturnValueSlot(), args); |
1367 | 7 | } |
1368 | | |
1369 | | |
1370 | 457 | static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) { |
1371 | 457 | Expr *setter = PID->getSetterCXXAssignment(); |
1372 | 457 | if (!setter) return true420 ; |
1373 | | |
1374 | | // Sema only makes only of these when the ivar has a C++ class type, |
1375 | | // so the form is pretty constrained. |
1376 | | |
1377 | | // An operator call is trivial if the function it calls is trivial. |
1378 | | // This also implies that there's nothing non-trivial going on with |
1379 | | // the arguments, because operator= can only be trivial if it's a |
1380 | | // synthesized assignment operator and therefore both parameters are |
1381 | | // references. |
1382 | 37 | if (CallExpr *call = dyn_cast<CallExpr>(setter)) { |
1383 | 37 | if (const FunctionDecl *callee |
1384 | 37 | = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl())) |
1385 | 37 | if (callee->isTrivial()) |
1386 | 18 | return true; |
1387 | 19 | return false; |
1388 | 37 | } |
1389 | | |
1390 | 0 | assert(isa<ExprWithCleanups>(setter)); |
1391 | 0 | return false; |
1392 | 37 | } |
1393 | | |
1394 | 157 | static bool UseOptimizedSetter(CodeGenModule &CGM) { |
1395 | 157 | if (CGM.getLangOpts().getGC() != LangOptions::NonGC) |
1396 | 0 | return false; |
1397 | 157 | return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter(); |
1398 | 157 | } |
1399 | | |
1400 | | void |
1401 | | CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl, |
1402 | | const ObjCPropertyImplDecl *propImpl, |
1403 | 442 | llvm::Constant *AtomicHelperFn) { |
1404 | 442 | ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); |
1405 | 442 | ObjCMethodDecl *setterMethod = propImpl->getSetterMethodDecl(); |
1406 | | |
1407 | | // Just use the setter expression if Sema gave us one and it's |
1408 | | // non-trivial. |
1409 | 442 | if (!hasTrivialSetExpr(propImpl)) { |
1410 | 12 | if (!AtomicHelperFn) |
1411 | | // If non-atomic, assignment is called directly. |
1412 | 5 | EmitStmt(propImpl->getSetterCXXAssignment()); |
1413 | 7 | else |
1414 | | // If atomic, assignment is called via a locking api. |
1415 | 7 | emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar, |
1416 | 7 | AtomicHelperFn); |
1417 | 12 | return; |
1418 | 12 | } |
1419 | | |
1420 | 430 | PropertyImplStrategy strategy(CGM, propImpl); |
1421 | 430 | switch (strategy.getKind()) { |
1422 | 199 | case PropertyImplStrategy::Native: { |
1423 | | // We don't need to do anything for a zero-size struct. |
1424 | 199 | if (strategy.getIvarSize().isZero()) |
1425 | 1 | return; |
1426 | | |
1427 | 198 | Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); |
1428 | | |
1429 | 198 | LValue ivarLValue = |
1430 | 198 | EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0); |
1431 | 198 | Address ivarAddr = ivarLValue.getAddress(*this); |
1432 | | |
1433 | | // Currently, all atomic accesses have to be through integer |
1434 | | // types, so there's no point in trying to pick a prettier type. |
1435 | 198 | llvm::Type *bitcastType = |
1436 | 198 | llvm::Type::getIntNTy(getLLVMContext(), |
1437 | 198 | getContext().toBits(strategy.getIvarSize())); |
1438 | | |
1439 | | // Cast both arguments to the chosen operation type. |
1440 | 198 | argAddr = Builder.CreateElementBitCast(argAddr, bitcastType); |
1441 | 198 | ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType); |
1442 | | |
1443 | | // This bitcast load is likely to cause some nasty IR. |
1444 | 198 | llvm::Value *load = Builder.CreateLoad(argAddr); |
1445 | | |
1446 | | // Perform an atomic store. There are no memory ordering requirements. |
1447 | 198 | llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr); |
1448 | 198 | store->setAtomic(llvm::AtomicOrdering::Unordered); |
1449 | 198 | return; |
1450 | 199 | } |
1451 | | |
1452 | 100 | case PropertyImplStrategy::GetSetProperty: |
1453 | 157 | case PropertyImplStrategy::SetPropertyAndExpressionGet: { |
1454 | | |
1455 | 157 | llvm::FunctionCallee setOptimizedPropertyFn = nullptr; |
1456 | 157 | llvm::FunctionCallee setPropertyFn = nullptr; |
1457 | 157 | if (UseOptimizedSetter(CGM)) { |
1458 | | // 10.8 and iOS 6.0 code and GC is off |
1459 | 92 | setOptimizedPropertyFn = |
1460 | 92 | CGM.getObjCRuntime().GetOptimizedPropertySetFunction( |
1461 | 92 | strategy.isAtomic(), strategy.isCopy()); |
1462 | 92 | if (!setOptimizedPropertyFn) { |
1463 | 0 | CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI"); |
1464 | 0 | return; |
1465 | 0 | } |
1466 | 92 | } |
1467 | 65 | else { |
1468 | 65 | setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction(); |
1469 | 65 | if (!setPropertyFn) { |
1470 | 0 | CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy"); |
1471 | 0 | return; |
1472 | 0 | } |
1473 | 65 | } |
1474 | | |
1475 | | // Emit objc_setProperty((id) self, _cmd, offset, arg, |
1476 | | // <is-atomic>, <is-copy>). |
1477 | 157 | llvm::Value *cmd = |
1478 | 157 | Builder.CreateLoad(GetAddrOfLocalVar(setterMethod->getCmdDecl())); |
1479 | 157 | llvm::Value *self = |
1480 | 157 | Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); |
1481 | 157 | llvm::Value *ivarOffset = |
1482 | 157 | EmitIvarOffset(classImpl->getClassInterface(), ivar); |
1483 | 157 | Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); |
1484 | 157 | llvm::Value *arg = Builder.CreateLoad(argAddr, "arg"); |
1485 | 157 | arg = Builder.CreateBitCast(arg, VoidPtrTy); |
1486 | | |
1487 | 157 | CallArgList args; |
1488 | 157 | args.add(RValue::get(self), getContext().getObjCIdType()); |
1489 | 157 | args.add(RValue::get(cmd), getContext().getObjCSelType()); |
1490 | 157 | if (setOptimizedPropertyFn) { |
1491 | 92 | args.add(RValue::get(arg), getContext().getObjCIdType()); |
1492 | 92 | args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); |
1493 | 92 | CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn); |
1494 | 92 | EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), |
1495 | 92 | callee, ReturnValueSlot(), args); |
1496 | 92 | } else { |
1497 | 65 | args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); |
1498 | 65 | args.add(RValue::get(arg), getContext().getObjCIdType()); |
1499 | 65 | args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), |
1500 | 65 | getContext().BoolTy); |
1501 | 65 | args.add(RValue::get(Builder.getInt1(strategy.isCopy())), |
1502 | 65 | getContext().BoolTy); |
1503 | | // FIXME: We shouldn't need to get the function info here, the runtime |
1504 | | // already should have computed it to build the function. |
1505 | 65 | CGCallee callee = CGCallee::forDirect(setPropertyFn); |
1506 | 65 | EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), |
1507 | 65 | callee, ReturnValueSlot(), args); |
1508 | 65 | } |
1509 | | |
1510 | 157 | return; |
1511 | 157 | } |
1512 | | |
1513 | 25 | case PropertyImplStrategy::CopyStruct: |
1514 | 25 | emitStructSetterCall(*this, setterMethod, ivar); |
1515 | 25 | return; |
1516 | | |
1517 | 49 | case PropertyImplStrategy::Expression: |
1518 | 49 | break; |
1519 | 430 | } |
1520 | | |
1521 | | // Otherwise, fake up some ASTs and emit a normal assignment. |
1522 | 49 | ValueDecl *selfDecl = setterMethod->getSelfDecl(); |
1523 | 49 | DeclRefExpr self(getContext(), selfDecl, false, selfDecl->getType(), |
1524 | 49 | VK_LValue, SourceLocation()); |
1525 | 49 | ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack, selfDecl->getType(), |
1526 | 49 | CK_LValueToRValue, &self, VK_PRValue, |
1527 | 49 | FPOptionsOverride()); |
1528 | 49 | ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(), |
1529 | 49 | SourceLocation(), SourceLocation(), |
1530 | 49 | &selfLoad, true, true); |
1531 | | |
1532 | 49 | ParmVarDecl *argDecl = *setterMethod->param_begin(); |
1533 | 49 | QualType argType = argDecl->getType().getNonReferenceType(); |
1534 | 49 | DeclRefExpr arg(getContext(), argDecl, false, argType, VK_LValue, |
1535 | 49 | SourceLocation()); |
1536 | 49 | ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack, |
1537 | 49 | argType.getUnqualifiedType(), CK_LValueToRValue, |
1538 | 49 | &arg, VK_PRValue, FPOptionsOverride()); |
1539 | | |
1540 | | // The property type can differ from the ivar type in some situations with |
1541 | | // Objective-C pointer types, we can always bit cast the RHS in these cases. |
1542 | | // The following absurdity is just to ensure well-formed IR. |
1543 | 49 | CastKind argCK = CK_NoOp; |
1544 | 49 | if (ivarRef.getType()->isObjCObjectPointerType()) { |
1545 | 34 | if (argLoad.getType()->isObjCObjectPointerType()) |
1546 | 34 | argCK = CK_BitCast; |
1547 | 0 | else if (argLoad.getType()->isBlockPointerType()) |
1548 | 0 | argCK = CK_BlockPointerToObjCPointerCast; |
1549 | 0 | else |
1550 | 0 | argCK = CK_CPointerToObjCPointerCast; |
1551 | 34 | } else if (15 ivarRef.getType()->isBlockPointerType()15 ) { |
1552 | 0 | if (argLoad.getType()->isBlockPointerType()) |
1553 | 0 | argCK = CK_BitCast; |
1554 | 0 | else |
1555 | 0 | argCK = CK_AnyPointerToBlockPointerCast; |
1556 | 15 | } else if (ivarRef.getType()->isPointerType()) { |
1557 | 0 | argCK = CK_BitCast; |
1558 | 15 | } else if (argLoad.getType()->isAtomicType() && |
1559 | 15 | !ivarRef.getType()->isAtomicType()0 ) { |
1560 | 0 | argCK = CK_AtomicToNonAtomic; |
1561 | 15 | } else if (!argLoad.getType()->isAtomicType() && |
1562 | 15 | ivarRef.getType()->isAtomicType()) { |
1563 | 1 | argCK = CK_NonAtomicToAtomic; |
1564 | 1 | } |
1565 | 49 | ImplicitCastExpr argCast(ImplicitCastExpr::OnStack, ivarRef.getType(), argCK, |
1566 | 49 | &argLoad, VK_PRValue, FPOptionsOverride()); |
1567 | 49 | Expr *finalArg = &argLoad; |
1568 | 49 | if (!getContext().hasSameUnqualifiedType(ivarRef.getType(), |
1569 | 49 | argLoad.getType())) |
1570 | 3 | finalArg = &argCast; |
1571 | | |
1572 | 49 | BinaryOperator *assign = BinaryOperator::Create( |
1573 | 49 | getContext(), &ivarRef, finalArg, BO_Assign, ivarRef.getType(), |
1574 | 49 | VK_PRValue, OK_Ordinary, SourceLocation(), FPOptionsOverride()); |
1575 | 49 | EmitStmt(assign); |
1576 | 49 | } |
1577 | | |
1578 | | /// Generate an Objective-C property setter function. |
1579 | | /// |
1580 | | /// The given Decl must be an ObjCImplementationDecl. \@synthesize |
1581 | | /// is illegal within a category. |
1582 | | void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP, |
1583 | 442 | const ObjCPropertyImplDecl *PID) { |
1584 | 442 | llvm::Constant *AtomicHelperFn = |
1585 | 442 | CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID); |
1586 | 442 | ObjCMethodDecl *OMD = PID->getSetterMethodDecl(); |
1587 | 442 | assert(OMD && "Invalid call to generate setter (empty method)"); |
1588 | 0 | StartObjCMethod(OMD, IMP->getClassInterface()); |
1589 | | |
1590 | 442 | generateObjCSetterBody(IMP, PID, AtomicHelperFn); |
1591 | | |
1592 | 442 | FinishFunction(OMD->getEndLoc()); |
1593 | 442 | } |
1594 | | |
1595 | | namespace { |
1596 | | struct DestroyIvar final : EHScopeStack::Cleanup { |
1597 | | private: |
1598 | | llvm::Value *addr; |
1599 | | const ObjCIvarDecl *ivar; |
1600 | | CodeGenFunction::Destroyer *destroyer; |
1601 | | bool useEHCleanupForArray; |
1602 | | public: |
1603 | | DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar, |
1604 | | CodeGenFunction::Destroyer *destroyer, |
1605 | | bool useEHCleanupForArray) |
1606 | | : addr(addr), ivar(ivar), destroyer(destroyer), |
1607 | 111 | useEHCleanupForArray(useEHCleanupForArray) {} |
1608 | | |
1609 | 111 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
1610 | 111 | LValue lvalue |
1611 | 111 | = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0); |
1612 | 111 | CGF.emitDestroy(lvalue.getAddress(CGF), ivar->getType(), destroyer, |
1613 | 111 | flags.isForNormalCleanup() && useEHCleanupForArray); |
1614 | 111 | } |
1615 | | }; |
1616 | | } |
1617 | | |
1618 | | /// Like CodeGenFunction::destroyARCStrong, but do it with a call. |
1619 | | static void destroyARCStrongWithStore(CodeGenFunction &CGF, |
1620 | | Address addr, |
1621 | 64 | QualType type) { |
1622 | 64 | llvm::Value *null = getNullForVariable(addr); |
1623 | 64 | CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true); |
1624 | 64 | } |
1625 | | |
1626 | | static void emitCXXDestructMethod(CodeGenFunction &CGF, |
1627 | 75 | ObjCImplementationDecl *impl) { |
1628 | 75 | CodeGenFunction::RunCleanupsScope scope(CGF); |
1629 | | |
1630 | 75 | llvm::Value *self = CGF.LoadObjCSelf(); |
1631 | | |
1632 | 75 | const ObjCInterfaceDecl *iface = impl->getClassInterface(); |
1633 | 75 | for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin(); |
1634 | 207 | ivar; ivar = ivar->getNextIvar()132 ) { |
1635 | 132 | QualType type = ivar->getType(); |
1636 | | |
1637 | | // Check whether the ivar is a destructible type. |
1638 | 132 | QualType::DestructionKind dtorKind = type.isDestructedType(); |
1639 | 132 | if (!dtorKind) continue21 ; |
1640 | | |
1641 | 111 | CodeGenFunction::Destroyer *destroyer = nullptr; |
1642 | | |
1643 | | // Use a call to objc_storeStrong to destroy strong ivars, for the |
1644 | | // general benefit of the tools. |
1645 | 111 | if (dtorKind == QualType::DK_objc_strong_lifetime) { |
1646 | 64 | destroyer = destroyARCStrongWithStore; |
1647 | | |
1648 | | // Otherwise use the default for the destruction kind. |
1649 | 64 | } else { |
1650 | 47 | destroyer = CGF.getDestroyer(dtorKind); |
1651 | 47 | } |
1652 | | |
1653 | 111 | CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind); |
1654 | | |
1655 | 111 | CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer, |
1656 | 111 | cleanupKind & EHCleanup); |
1657 | 111 | } |
1658 | | |
1659 | 75 | assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?"); |
1660 | 75 | } |
1661 | | |
1662 | | void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, |
1663 | | ObjCMethodDecl *MD, |
1664 | 97 | bool ctor) { |
1665 | 97 | MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface()); |
1666 | 97 | StartObjCMethod(MD, IMP->getClassInterface()); |
1667 | | |
1668 | | // Emit .cxx_construct. |
1669 | 97 | if (ctor) { |
1670 | | // Suppress the final autorelease in ARC. |
1671 | 22 | AutoreleaseResult = false; |
1672 | | |
1673 | 31 | for (const auto *IvarInit : IMP->inits()) { |
1674 | 31 | FieldDecl *Field = IvarInit->getAnyMember(); |
1675 | 31 | ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field); |
1676 | 31 | LValue LV = EmitLValueForIvar(TypeOfSelfObject(), |
1677 | 31 | LoadObjCSelf(), Ivar, 0); |
1678 | 31 | EmitAggExpr(IvarInit->getInit(), |
1679 | 31 | AggValueSlot::forLValue(LV, *this, AggValueSlot::IsDestructed, |
1680 | 31 | AggValueSlot::DoesNotNeedGCBarriers, |
1681 | 31 | AggValueSlot::IsNotAliased, |
1682 | 31 | AggValueSlot::DoesNotOverlap)); |
1683 | 31 | } |
1684 | | // constructor returns 'self'. |
1685 | 22 | CodeGenTypes &Types = CGM.getTypes(); |
1686 | 22 | QualType IdTy(CGM.getContext().getObjCIdType()); |
1687 | 22 | llvm::Value *SelfAsId = |
1688 | 22 | Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy)); |
1689 | 22 | EmitReturnOfRValue(RValue::get(SelfAsId), IdTy); |
1690 | | |
1691 | | // Emit .cxx_destruct. |
1692 | 75 | } else { |
1693 | 75 | emitCXXDestructMethod(*this, IMP); |
1694 | 75 | } |
1695 | 97 | FinishFunction(); |
1696 | 97 | } |
1697 | | |
1698 | 1.45k | llvm::Value *CodeGenFunction::LoadObjCSelf() { |
1699 | 1.45k | VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl(); |
1700 | 1.45k | DeclRefExpr DRE(getContext(), Self, |
1701 | 1.45k | /*is enclosing local*/ (CurFuncDecl != CurCodeDecl), |
1702 | 1.45k | Self->getType(), VK_LValue, SourceLocation()); |
1703 | 1.45k | return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation()); |
1704 | 1.45k | } |
1705 | | |
1706 | 719 | QualType CodeGenFunction::TypeOfSelfObject() { |
1707 | 719 | const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); |
1708 | 719 | ImplicitParamDecl *selfDecl = OMD->getSelfDecl(); |
1709 | 719 | const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>( |
1710 | 719 | getContext().getCanonicalType(selfDecl->getType())); |
1711 | 719 | return PTy->getPointeeType(); |
1712 | 719 | } |
1713 | | |
1714 | 68 | void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ |
1715 | 68 | llvm::FunctionCallee EnumerationMutationFnPtr = |
1716 | 68 | CGM.getObjCRuntime().EnumerationMutationFunction(); |
1717 | 68 | if (!EnumerationMutationFnPtr) { |
1718 | 0 | CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime"); |
1719 | 0 | return; |
1720 | 0 | } |
1721 | 68 | CGCallee EnumerationMutationFn = |
1722 | 68 | CGCallee::forDirect(EnumerationMutationFnPtr); |
1723 | | |
1724 | 68 | CGDebugInfo *DI = getDebugInfo(); |
1725 | 68 | if (DI) |
1726 | 31 | DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin()); |
1727 | | |
1728 | 68 | RunCleanupsScope ForScope(*this); |
1729 | | |
1730 | | // The local variable comes into scope immediately. |
1731 | 68 | AutoVarEmission variable = AutoVarEmission::invalid(); |
1732 | 68 | if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) |
1733 | 68 | variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl())); |
1734 | | |
1735 | 68 | JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end"); |
1736 | | |
1737 | | // Fast enumeration state. |
1738 | 68 | QualType StateTy = CGM.getObjCFastEnumerationStateType(); |
1739 | 68 | Address StatePtr = CreateMemTemp(StateTy, "state.ptr"); |
1740 | 68 | EmitNullInitialization(StatePtr, StateTy); |
1741 | | |
1742 | | // Number of elements in the items array. |
1743 | 68 | static const unsigned NumItems = 16; |
1744 | | |
1745 | | // Fetch the countByEnumeratingWithState:objects:count: selector. |
1746 | 68 | IdentifierInfo *II[] = { |
1747 | 68 | &CGM.getContext().Idents.get("countByEnumeratingWithState"), |
1748 | 68 | &CGM.getContext().Idents.get("objects"), |
1749 | 68 | &CGM.getContext().Idents.get("count") |
1750 | 68 | }; |
1751 | 68 | Selector FastEnumSel = |
1752 | 68 | CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]); |
1753 | | |
1754 | 68 | QualType ItemsTy = |
1755 | 68 | getContext().getConstantArrayType(getContext().getObjCIdType(), |
1756 | 68 | llvm::APInt(32, NumItems), nullptr, |
1757 | 68 | ArrayType::Normal, 0); |
1758 | 68 | Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr"); |
1759 | | |
1760 | | // Emit the collection pointer. In ARC, we do a retain. |
1761 | 68 | llvm::Value *Collection; |
1762 | 68 | if (getLangOpts().ObjCAutoRefCount) { |
1763 | 12 | Collection = EmitARCRetainScalarExpr(S.getCollection()); |
1764 | | |
1765 | | // Enter a cleanup to do the release. |
1766 | 12 | EmitObjCConsumeObject(S.getCollection()->getType(), Collection); |
1767 | 56 | } else { |
1768 | 56 | Collection = EmitScalarExpr(S.getCollection()); |
1769 | 56 | } |
1770 | | |
1771 | | // The 'continue' label needs to appear within the cleanup for the |
1772 | | // collection object. |
1773 | 68 | JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next"); |
1774 | | |
1775 | | // Send it our message: |
1776 | 68 | CallArgList Args; |
1777 | | |
1778 | | // The first argument is a temporary of the enumeration-state type. |
1779 | 68 | Args.add(RValue::get(StatePtr.getPointer()), |
1780 | 68 | getContext().getPointerType(StateTy)); |
1781 | | |
1782 | | // The second argument is a temporary array with space for NumItems |
1783 | | // pointers. We'll actually be loading elements from the array |
1784 | | // pointer written into the control state; this buffer is so that |
1785 | | // collections that *aren't* backed by arrays can still queue up |
1786 | | // batches of elements. |
1787 | 68 | Args.add(RValue::get(ItemsPtr.getPointer()), |
1788 | 68 | getContext().getPointerType(ItemsTy)); |
1789 | | |
1790 | | // The third argument is the capacity of that temporary array. |
1791 | 68 | llvm::Type *NSUIntegerTy = ConvertType(getContext().getNSUIntegerType()); |
1792 | 68 | llvm::Constant *Count = llvm::ConstantInt::get(NSUIntegerTy, NumItems); |
1793 | 68 | Args.add(RValue::get(Count), getContext().getNSUIntegerType()); |
1794 | | |
1795 | | // Start the enumeration. |
1796 | 68 | RValue CountRV = |
1797 | 68 | CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), |
1798 | 68 | getContext().getNSUIntegerType(), |
1799 | 68 | FastEnumSel, Collection, Args); |
1800 | | |
1801 | | // The initial number of objects that were returned in the buffer. |
1802 | 68 | llvm::Value *initialBufferLimit = CountRV.getScalarVal(); |
1803 | | |
1804 | 68 | llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty"); |
1805 | 68 | llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit"); |
1806 | | |
1807 | 68 | llvm::Value *zero = llvm::Constant::getNullValue(NSUIntegerTy); |
1808 | | |
1809 | | // If the limit pointer was zero to begin with, the collection is |
1810 | | // empty; skip all this. Set the branch weight assuming this has the same |
1811 | | // probability of exiting the loop as any other loop exit. |
1812 | 68 | uint64_t EntryCount = getCurrentProfileCount(); |
1813 | 68 | Builder.CreateCondBr( |
1814 | 68 | Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB, |
1815 | 68 | LoopInitBB, |
1816 | 68 | createProfileWeights(EntryCount, getProfileCount(S.getBody()))); |
1817 | | |
1818 | | // Otherwise, initialize the loop. |
1819 | 68 | EmitBlock(LoopInitBB); |
1820 | | |
1821 | | // Save the initial mutations value. This is the value at an |
1822 | | // address that was written into the state object by |
1823 | | // countByEnumeratingWithState:objects:count:. |
1824 | 68 | Address StateMutationsPtrPtr = |
1825 | 68 | Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr"); |
1826 | 68 | llvm::Value *StateMutationsPtr |
1827 | 68 | = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); |
1828 | | |
1829 | 68 | llvm::Type *UnsignedLongTy = ConvertType(getContext().UnsignedLongTy); |
1830 | 68 | llvm::Value *initialMutations = |
1831 | 68 | Builder.CreateAlignedLoad(UnsignedLongTy, StateMutationsPtr, |
1832 | 68 | getPointerAlign(), "forcoll.initial-mutations"); |
1833 | | |
1834 | | // Start looping. This is the point we return to whenever we have a |
1835 | | // fresh, non-empty batch of objects. |
1836 | 68 | llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody"); |
1837 | 68 | EmitBlock(LoopBodyBB); |
1838 | | |
1839 | | // The current index into the buffer. |
1840 | 68 | llvm::PHINode *index = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.index"); |
1841 | 68 | index->addIncoming(zero, LoopInitBB); |
1842 | | |
1843 | | // The current buffer size. |
1844 | 68 | llvm::PHINode *count = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.count"); |
1845 | 68 | count->addIncoming(initialBufferLimit, LoopInitBB); |
1846 | | |
1847 | 68 | incrementProfileCounter(&S); |
1848 | | |
1849 | | // Check whether the mutations value has changed from where it was |
1850 | | // at start. StateMutationsPtr should actually be invariant between |
1851 | | // refreshes. |
1852 | 68 | StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); |
1853 | 68 | llvm::Value *currentMutations |
1854 | 68 | = Builder.CreateAlignedLoad(UnsignedLongTy, StateMutationsPtr, |
1855 | 68 | getPointerAlign(), "statemutations"); |
1856 | | |
1857 | 68 | llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated"); |
1858 | 68 | llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated"); |
1859 | | |
1860 | 68 | Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations), |
1861 | 68 | WasNotMutatedBB, WasMutatedBB); |
1862 | | |
1863 | | // If so, call the enumeration-mutation function. |
1864 | 68 | EmitBlock(WasMutatedBB); |
1865 | 68 | llvm::Type *ObjCIdType = ConvertType(getContext().getObjCIdType()); |
1866 | 68 | llvm::Value *V = |
1867 | 68 | Builder.CreateBitCast(Collection, ObjCIdType); |
1868 | 68 | CallArgList Args2; |
1869 | 68 | Args2.add(RValue::get(V), getContext().getObjCIdType()); |
1870 | | // FIXME: We shouldn't need to get the function info here, the runtime already |
1871 | | // should have computed it to build the function. |
1872 | 68 | EmitCall( |
1873 | 68 | CGM.getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, Args2), |
1874 | 68 | EnumerationMutationFn, ReturnValueSlot(), Args2); |
1875 | | |
1876 | | // Otherwise, or if the mutation function returns, just continue. |
1877 | 68 | EmitBlock(WasNotMutatedBB); |
1878 | | |
1879 | | // Initialize the element variable. |
1880 | 68 | RunCleanupsScope elementVariableScope(*this); |
1881 | 68 | bool elementIsVariable; |
1882 | 68 | LValue elementLValue; |
1883 | 68 | QualType elementType; |
1884 | 68 | if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) { |
1885 | | // Initialize the variable, in case it's a __block variable or something. |
1886 | 68 | EmitAutoVarInit(variable); |
1887 | | |
1888 | 68 | const VarDecl *D = cast<VarDecl>(SD->getSingleDecl()); |
1889 | 68 | DeclRefExpr tempDRE(getContext(), const_cast<VarDecl *>(D), false, |
1890 | 68 | D->getType(), VK_LValue, SourceLocation()); |
1891 | 68 | elementLValue = EmitLValue(&tempDRE); |
1892 | 68 | elementType = D->getType(); |
1893 | 68 | elementIsVariable = true; |
1894 | | |
1895 | 68 | if (D->isARCPseudoStrong()) |
1896 | 9 | elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone); |
1897 | 68 | } else { |
1898 | 0 | elementLValue = LValue(); // suppress warning |
1899 | 0 | elementType = cast<Expr>(S.getElement())->getType(); |
1900 | 0 | elementIsVariable = false; |
1901 | 0 | } |
1902 | 68 | llvm::Type *convertedElementType = ConvertType(elementType); |
1903 | | |
1904 | | // Fetch the buffer out of the enumeration state. |
1905 | | // TODO: this pointer should actually be invariant between |
1906 | | // refreshes, which would help us do certain loop optimizations. |
1907 | 68 | Address StateItemsPtr = |
1908 | 68 | Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr"); |
1909 | 68 | llvm::Value *EnumStateItems = |
1910 | 68 | Builder.CreateLoad(StateItemsPtr, "stateitems"); |
1911 | | |
1912 | | // Fetch the value at the current index from the buffer. |
1913 | 68 | llvm::Value *CurrentItemPtr = Builder.CreateGEP( |
1914 | 68 | ObjCIdType, EnumStateItems, index, "currentitem.ptr"); |
1915 | 68 | llvm::Value *CurrentItem = |
1916 | 68 | Builder.CreateAlignedLoad(ObjCIdType, CurrentItemPtr, getPointerAlign()); |
1917 | | |
1918 | 68 | if (SanOpts.has(SanitizerKind::ObjCCast)) { |
1919 | | // Before using an item from the collection, check that the implicit cast |
1920 | | // from id to the element type is valid. This is done with instrumentation |
1921 | | // roughly corresponding to: |
1922 | | // |
1923 | | // if (![item isKindOfClass:expectedCls]) { /* emit diagnostic */ } |
1924 | 3 | const ObjCObjectPointerType *ObjPtrTy = |
1925 | 3 | elementType->getAsObjCInterfacePointerType(); |
1926 | 3 | const ObjCInterfaceType *InterfaceTy = |
1927 | 3 | ObjPtrTy ? ObjPtrTy->getInterfaceType() : nullptr0 ; |
1928 | 3 | if (InterfaceTy) { |
1929 | 3 | SanitizerScope SanScope(this); |
1930 | 3 | auto &C = CGM.getContext(); |
1931 | 3 | assert(InterfaceTy->getDecl() && "No decl for ObjC interface type"); |
1932 | 0 | Selector IsKindOfClassSel = GetUnarySelector("isKindOfClass", C); |
1933 | 3 | CallArgList IsKindOfClassArgs; |
1934 | 3 | llvm::Value *Cls = |
1935 | 3 | CGM.getObjCRuntime().GetClass(*this, InterfaceTy->getDecl()); |
1936 | 3 | IsKindOfClassArgs.add(RValue::get(Cls), C.getObjCClassType()); |
1937 | 3 | llvm::Value *IsClass = |
1938 | 3 | CGM.getObjCRuntime() |
1939 | 3 | .GenerateMessageSend(*this, ReturnValueSlot(), C.BoolTy, |
1940 | 3 | IsKindOfClassSel, CurrentItem, |
1941 | 3 | IsKindOfClassArgs) |
1942 | 3 | .getScalarVal(); |
1943 | 3 | llvm::Constant *StaticData[] = { |
1944 | 3 | EmitCheckSourceLocation(S.getBeginLoc()), |
1945 | 3 | EmitCheckTypeDescriptor(QualType(InterfaceTy, 0))}; |
1946 | 3 | EmitCheck({{IsClass, SanitizerKind::ObjCCast}}, |
1947 | 3 | SanitizerHandler::InvalidObjCCast, |
1948 | 3 | ArrayRef<llvm::Constant *>(StaticData), CurrentItem); |
1949 | 3 | } |
1950 | 3 | } |
1951 | | |
1952 | | // Cast that value to the right type. |
1953 | 0 | CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType, |
1954 | 68 | "currentitem"); |
1955 | | |
1956 | | // Make sure we have an l-value. Yes, this gets evaluated every |
1957 | | // time through the loop. |
1958 | 68 | if (!elementIsVariable) { |
1959 | 0 | elementLValue = EmitLValue(cast<Expr>(S.getElement())); |
1960 | 0 | EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue); |
1961 | 68 | } else { |
1962 | 68 | EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue, |
1963 | 68 | /*isInit*/ true); |
1964 | 68 | } |
1965 | | |
1966 | | // If we do have an element variable, this assignment is the end of |
1967 | | // its initialization. |
1968 | 68 | if (elementIsVariable) |
1969 | 68 | EmitAutoVarCleanups(variable); |
1970 | | |
1971 | | // Perform the loop body, setting up break and continue labels. |
1972 | 68 | BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody)); |
1973 | 68 | { |
1974 | 68 | RunCleanupsScope Scope(*this); |
1975 | 68 | EmitStmt(S.getBody()); |
1976 | 68 | } |
1977 | 68 | BreakContinueStack.pop_back(); |
1978 | | |
1979 | | // Destroy the element variable now. |
1980 | 68 | elementVariableScope.ForceCleanup(); |
1981 | | |
1982 | | // Check whether there are more elements. |
1983 | 68 | EmitBlock(AfterBody.getBlock()); |
1984 | | |
1985 | 68 | llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch"); |
1986 | | |
1987 | | // First we check in the local buffer. |
1988 | 68 | llvm::Value *indexPlusOne = |
1989 | 68 | Builder.CreateAdd(index, llvm::ConstantInt::get(NSUIntegerTy, 1)); |
1990 | | |
1991 | | // If we haven't overrun the buffer yet, we can continue. |
1992 | | // Set the branch weights based on the simplifying assumption that this is |
1993 | | // like a while-loop, i.e., ignoring that the false branch fetches more |
1994 | | // elements and then returns to the loop. |
1995 | 68 | Builder.CreateCondBr( |
1996 | 68 | Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB, |
1997 | 68 | createProfileWeights(getProfileCount(S.getBody()), EntryCount)); |
1998 | | |
1999 | 68 | index->addIncoming(indexPlusOne, AfterBody.getBlock()); |
2000 | 68 | count->addIncoming(count, AfterBody.getBlock()); |
2001 | | |
2002 | | // Otherwise, we have to fetch more elements. |
2003 | 68 | EmitBlock(FetchMoreBB); |
2004 | | |
2005 | 68 | CountRV = |
2006 | 68 | CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), |
2007 | 68 | getContext().getNSUIntegerType(), |
2008 | 68 | FastEnumSel, Collection, Args); |
2009 | | |
2010 | | // If we got a zero count, we're done. |
2011 | 68 | llvm::Value *refetchCount = CountRV.getScalarVal(); |
2012 | | |
2013 | | // (note that the message send might split FetchMoreBB) |
2014 | 68 | index->addIncoming(zero, Builder.GetInsertBlock()); |
2015 | 68 | count->addIncoming(refetchCount, Builder.GetInsertBlock()); |
2016 | | |
2017 | 68 | Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero), |
2018 | 68 | EmptyBB, LoopBodyBB); |
2019 | | |
2020 | | // No more elements. |
2021 | 68 | EmitBlock(EmptyBB); |
2022 | | |
2023 | 68 | if (!elementIsVariable) { |
2024 | | // If the element was not a declaration, set it to be null. |
2025 | |
|
2026 | 0 | llvm::Value *null = llvm::Constant::getNullValue(convertedElementType); |
2027 | 0 | elementLValue = EmitLValue(cast<Expr>(S.getElement())); |
2028 | 0 | EmitStoreThroughLValue(RValue::get(null), elementLValue); |
2029 | 0 | } |
2030 | | |
2031 | 68 | if (DI) |
2032 | 31 | DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd()); |
2033 | | |
2034 | 68 | ForScope.ForceCleanup(); |
2035 | 68 | EmitBlock(LoopEnd.getBlock()); |
2036 | 68 | } |
2037 | | |
2038 | 220 | void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) { |
2039 | 220 | CGM.getObjCRuntime().EmitTryStmt(*this, S); |
2040 | 220 | } |
2041 | | |
2042 | 50 | void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) { |
2043 | 50 | CGM.getObjCRuntime().EmitThrowStmt(*this, S); |
2044 | 50 | } |
2045 | | |
2046 | | void CodeGenFunction::EmitObjCAtSynchronizedStmt( |
2047 | 13 | const ObjCAtSynchronizedStmt &S) { |
2048 | 13 | CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S); |
2049 | 13 | } |
2050 | | |
2051 | | namespace { |
2052 | | struct CallObjCRelease final : EHScopeStack::Cleanup { |
2053 | 297 | CallObjCRelease(llvm::Value *object) : object(object) {} |
2054 | | llvm::Value *object; |
2055 | | |
2056 | 301 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2057 | | // Releases at the end of the full-expression are imprecise. |
2058 | 301 | CGF.EmitARCRelease(object, ARCImpreciseLifetime); |
2059 | 301 | } |
2060 | | }; |
2061 | | } |
2062 | | |
2063 | | /// Produce the code for a CK_ARCConsumeObject. Does a primitive |
2064 | | /// release at the end of the full-expression. |
2065 | | llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type, |
2066 | 297 | llvm::Value *object) { |
2067 | | // If we're in a conditional branch, we need to make the cleanup |
2068 | | // conditional. |
2069 | 297 | pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object); |
2070 | 297 | return object; |
2071 | 297 | } |
2072 | | |
2073 | | llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type, |
2074 | 0 | llvm::Value *value) { |
2075 | 0 | return EmitARCRetainAutorelease(type, value); |
2076 | 0 | } |
2077 | | |
2078 | | /// Given a number of pointers, inform the optimizer that they're |
2079 | | /// being intrinsically used up until this point in the program. |
2080 | 23 | void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) { |
2081 | 23 | llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_use; |
2082 | 23 | if (!fn) |
2083 | 9 | fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_use); |
2084 | | |
2085 | | // This isn't really a "runtime" function, but as an intrinsic it |
2086 | | // doesn't really matter as long as we align things up. |
2087 | 23 | EmitNounwindRuntimeCall(fn, values); |
2088 | 23 | } |
2089 | | |
2090 | | /// Emit a call to "clang.arc.noop.use", which consumes the result of a call |
2091 | | /// that has operand bundle "clang.arc.attachedcall". |
2092 | 129 | void CodeGenFunction::EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values) { |
2093 | 129 | llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_noop_use; |
2094 | 129 | if (!fn) |
2095 | 20 | fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_noop_use); |
2096 | 129 | EmitNounwindRuntimeCall(fn, values); |
2097 | 129 | } |
2098 | | |
2099 | 1.23k | static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, llvm::Value *RTF) { |
2100 | 1.23k | if (auto *F = dyn_cast<llvm::Function>(RTF)) { |
2101 | | // If the target runtime doesn't naturally support ARC, emit weak |
2102 | | // references to the runtime support library. We don't really |
2103 | | // permit this to fail, but we need a particular relocation style. |
2104 | 1.23k | if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() && |
2105 | 1.23k | !CGM.getTriple().isOSBinFormatCOFF()667 ) { |
2106 | 662 | F->setLinkage(llvm::Function::ExternalWeakLinkage); |
2107 | 662 | } |
2108 | 1.23k | } |
2109 | 1.23k | } |
2110 | | |
2111 | | static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, |
2112 | 97 | llvm::FunctionCallee RTF) { |
2113 | 97 | setARCRuntimeFunctionLinkage(CGM, RTF.getCallee()); |
2114 | 97 | } |
2115 | | |
2116 | | static llvm::Function *getARCIntrinsic(llvm::Intrinsic::ID IntID, |
2117 | 1.13k | CodeGenModule &CGM) { |
2118 | 1.13k | llvm::Function *fn = CGM.getIntrinsic(IntID); |
2119 | 1.13k | setARCRuntimeFunctionLinkage(CGM, fn); |
2120 | 1.13k | return fn; |
2121 | 1.13k | } |
2122 | | |
2123 | | /// Perform an operation having the signature |
2124 | | /// i8* (i8*) |
2125 | | /// where a null input causes a no-op and returns null. |
2126 | | static llvm::Value *emitARCValueOperation( |
2127 | | CodeGenFunction &CGF, llvm::Value *value, llvm::Type *returnType, |
2128 | | llvm::Function *&fn, llvm::Intrinsic::ID IntID, |
2129 | 1.26k | llvm::CallInst::TailCallKind tailKind = llvm::CallInst::TCK_None) { |
2130 | 1.26k | if (isa<llvm::ConstantPointerNull>(value)) |
2131 | 203 | return value; |
2132 | | |
2133 | 1.06k | if (!fn) |
2134 | 167 | fn = getARCIntrinsic(IntID, CGF.CGM); |
2135 | | |
2136 | | // Cast the argument to 'id'. |
2137 | 1.06k | llvm::Type *origType = returnType ? returnType0 : value->getType(); |
2138 | 1.06k | value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); |
2139 | | |
2140 | | // Call the function. |
2141 | 1.06k | llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value); |
2142 | 1.06k | call->setTailCallKind(tailKind); |
2143 | | |
2144 | | // Cast the result back to the original type. |
2145 | 1.06k | return CGF.Builder.CreateBitCast(call, origType); |
2146 | 1.26k | } |
2147 | | |
2148 | | /// Perform an operation having the following signature: |
2149 | | /// i8* (i8**) |
2150 | | static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr, |
2151 | | llvm::Function *&fn, |
2152 | 192 | llvm::Intrinsic::ID IntID) { |
2153 | 192 | if (!fn) |
2154 | 38 | fn = getARCIntrinsic(IntID, CGF.CGM); |
2155 | | |
2156 | | // Cast the argument to 'id*'. |
2157 | 192 | llvm::Type *origType = addr.getElementType(); |
2158 | 192 | addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8PtrTy); |
2159 | | |
2160 | | // Call the function. |
2161 | 192 | llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer()); |
2162 | | |
2163 | | // Cast the result back to a dereference of the original type. |
2164 | 192 | if (origType != CGF.Int8PtrTy) |
2165 | 15 | result = CGF.Builder.CreateBitCast(result, origType); |
2166 | | |
2167 | 192 | return result; |
2168 | 192 | } |
2169 | | |
2170 | | /// Perform an operation having the following signature: |
2171 | | /// i8* (i8**, i8*) |
2172 | | static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr, |
2173 | | llvm::Value *value, |
2174 | | llvm::Function *&fn, |
2175 | | llvm::Intrinsic::ID IntID, |
2176 | 101 | bool ignored) { |
2177 | 101 | assert(addr.getElementType() == value->getType()); |
2178 | | |
2179 | 101 | if (!fn) |
2180 | 55 | fn = getARCIntrinsic(IntID, CGF.CGM); |
2181 | | |
2182 | 101 | llvm::Type *origType = value->getType(); |
2183 | | |
2184 | 101 | llvm::Value *args[] = { |
2185 | 101 | CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy), |
2186 | 101 | CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy) |
2187 | 101 | }; |
2188 | 101 | llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args); |
2189 | | |
2190 | 101 | if (ignored) return nullptr80 ; |
2191 | | |
2192 | 21 | return CGF.Builder.CreateBitCast(result, origType); |
2193 | 101 | } |
2194 | | |
2195 | | /// Perform an operation having the following signature: |
2196 | | /// void (i8**, i8**) |
2197 | | static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src, |
2198 | | llvm::Function *&fn, |
2199 | 326 | llvm::Intrinsic::ID IntID) { |
2200 | 326 | assert(dst.getType() == src.getType()); |
2201 | | |
2202 | 326 | if (!fn) |
2203 | 49 | fn = getARCIntrinsic(IntID, CGF.CGM); |
2204 | | |
2205 | 326 | llvm::Value *args[] = { |
2206 | 326 | CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy), |
2207 | 326 | CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy) |
2208 | 326 | }; |
2209 | 326 | CGF.EmitNounwindRuntimeCall(fn, args); |
2210 | 326 | } |
2211 | | |
2212 | | /// Perform an operation having the signature |
2213 | | /// i8* (i8*) |
2214 | | /// where a null input causes a no-op and returns null. |
2215 | | static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF, |
2216 | | llvm::Value *value, |
2217 | | llvm::Type *returnType, |
2218 | | llvm::FunctionCallee &fn, |
2219 | 1.34k | StringRef fnName) { |
2220 | 1.34k | if (isa<llvm::ConstantPointerNull>(value)) |
2221 | 0 | return value; |
2222 | | |
2223 | 1.34k | if (!fn) { |
2224 | 412 | llvm::FunctionType *fnType = |
2225 | 412 | llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false); |
2226 | 412 | fn = CGF.CGM.CreateRuntimeFunction(fnType, fnName); |
2227 | | |
2228 | | // We have Native ARC, so set nonlazybind attribute for performance |
2229 | 412 | if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee())) |
2230 | 412 | if (fnName == "objc_retain") |
2231 | 5 | f->addFnAttr(llvm::Attribute::NonLazyBind); |
2232 | 412 | } |
2233 | | |
2234 | | // Cast the argument to 'id'. |
2235 | 1.34k | llvm::Type *origType = returnType ? returnType : value->getType()0 ; |
2236 | 1.34k | value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); |
2237 | | |
2238 | | // Call the function. |
2239 | 1.34k | llvm::CallBase *Inst = CGF.EmitCallOrInvoke(fn, value); |
2240 | | |
2241 | | // Mark calls to objc_autorelease as tail on the assumption that methods |
2242 | | // overriding autorelease do not touch anything on the stack. |
2243 | 1.34k | if (fnName == "objc_autorelease") |
2244 | 43 | if (auto *Call = dyn_cast<llvm::CallInst>(Inst)) |
2245 | 39 | Call->setTailCall(); |
2246 | | |
2247 | | // Cast the result back to the original type. |
2248 | 1.34k | return CGF.Builder.CreateBitCast(Inst, origType); |
2249 | 1.34k | } |
2250 | | |
2251 | | /// Produce the code to do a retain. Based on the type, calls one of: |
2252 | | /// call i8* \@objc_retain(i8* %value) |
2253 | | /// call i8* \@objc_retainBlock(i8* %value) |
2254 | 738 | llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) { |
2255 | 738 | if (type->isBlockPointerType()) |
2256 | 162 | return EmitARCRetainBlock(value, /*mandatory*/ false); |
2257 | 576 | else |
2258 | 576 | return EmitARCRetainNonBlock(value); |
2259 | 738 | } |
2260 | | |
2261 | | /// Retain the given object, with normal retain semantics. |
2262 | | /// call i8* \@objc_retain(i8* %value) |
2263 | 679 | llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) { |
2264 | 679 | return emitARCValueOperation(*this, value, nullptr, |
2265 | 679 | CGM.getObjCEntrypoints().objc_retain, |
2266 | 679 | llvm::Intrinsic::objc_retain); |
2267 | 679 | } |
2268 | | |
2269 | | /// Retain the given block, with _Block_copy semantics. |
2270 | | /// call i8* \@objc_retainBlock(i8* %value) |
2271 | | /// |
2272 | | /// \param mandatory - If false, emit the call with metadata |
2273 | | /// indicating that it's okay for the optimizer to eliminate this call |
2274 | | /// if it can prove that the block never escapes except down the stack. |
2275 | | llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value, |
2276 | 217 | bool mandatory) { |
2277 | 217 | llvm::Value *result |
2278 | 217 | = emitARCValueOperation(*this, value, nullptr, |
2279 | 217 | CGM.getObjCEntrypoints().objc_retainBlock, |
2280 | 217 | llvm::Intrinsic::objc_retainBlock); |
2281 | | |
2282 | | // If the copy isn't mandatory, add !clang.arc.copy_on_escape to |
2283 | | // tell the optimizer that it doesn't need to do this copy if the |
2284 | | // block doesn't escape, where being passed as an argument doesn't |
2285 | | // count as escaping. |
2286 | 217 | if (!mandatory && isa<llvm::Instruction>(result)186 ) { |
2287 | 166 | llvm::CallInst *call |
2288 | 166 | = cast<llvm::CallInst>(result->stripPointerCasts()); |
2289 | 166 | assert(call->getCalledOperand() == |
2290 | 166 | CGM.getObjCEntrypoints().objc_retainBlock); |
2291 | | |
2292 | 0 | call->setMetadata("clang.arc.copy_on_escape", |
2293 | 166 | llvm::MDNode::get(Builder.getContext(), None)); |
2294 | 166 | } |
2295 | | |
2296 | 0 | return result; |
2297 | 217 | } |
2298 | | |
2299 | 376 | static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) { |
2300 | | // Fetch the void(void) inline asm which marks that we're going to |
2301 | | // do something with the autoreleased return value. |
2302 | 376 | llvm::InlineAsm *&marker |
2303 | 376 | = CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker; |
2304 | 376 | if (!marker) { |
2305 | 322 | StringRef assembly |
2306 | 322 | = CGF.CGM.getTargetCodeGenInfo() |
2307 | 322 | .getARCRetainAutoreleasedReturnValueMarker(); |
2308 | | |
2309 | | // If we have an empty assembly string, there's nothing to do. |
2310 | 322 | if (assembly.empty()) { |
2311 | | |
2312 | | // Otherwise, at -O0, build an inline asm that we're going to call |
2313 | | // in a moment. |
2314 | 279 | } else if (43 CGF.CGM.getCodeGenOpts().OptimizationLevel == 043 ) { |
2315 | 16 | llvm::FunctionType *type = |
2316 | 16 | llvm::FunctionType::get(CGF.VoidTy, /*variadic*/false); |
2317 | | |
2318 | 16 | marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true); |
2319 | | |
2320 | | // If we're at -O1 and above, we don't want to litter the code |
2321 | | // with this marker yet, so leave a breadcrumb for the ARC |
2322 | | // optimizer to pick up. |
2323 | 27 | } else { |
2324 | 27 | const char *retainRVMarkerKey = llvm::objcarc::getRVMarkerModuleFlagStr(); |
2325 | 27 | if (!CGF.CGM.getModule().getModuleFlag(retainRVMarkerKey)) { |
2326 | 4 | auto *str = llvm::MDString::get(CGF.getLLVMContext(), assembly); |
2327 | 4 | CGF.CGM.getModule().addModuleFlag(llvm::Module::Error, |
2328 | 4 | retainRVMarkerKey, str); |
2329 | 4 | } |
2330 | 27 | } |
2331 | 322 | } |
2332 | | |
2333 | | // Call the marker asm if we made one, which we do only at -O0. |
2334 | 376 | if (marker) |
2335 | 70 | CGF.Builder.CreateCall(marker, None, CGF.getBundlesForFunclet(marker)); |
2336 | 376 | } |
2337 | | |
2338 | | static llvm::Value *emitOptimizedARCReturnCall(llvm::Value *value, |
2339 | | bool IsRetainRV, |
2340 | 376 | CodeGenFunction &CGF) { |
2341 | 376 | emitAutoreleasedReturnValueMarker(CGF); |
2342 | | |
2343 | | // Add operand bundle "clang.arc.attachedcall" to the call instead of emitting |
2344 | | // retainRV or claimRV calls in the IR. We currently do this only when the |
2345 | | // optimization level isn't -O0 since global-isel, which is currently run at |
2346 | | // -O0, doesn't know about the operand bundle. |
2347 | 376 | ObjCEntrypoints &EPs = CGF.CGM.getObjCEntrypoints(); |
2348 | 376 | llvm::Function *&EP = IsRetainRV |
2349 | 376 | ? EPs.objc_retainAutoreleasedReturnValue328 |
2350 | 376 | : EPs.objc_unsafeClaimAutoreleasedReturnValue48 ; |
2351 | 376 | llvm::Intrinsic::ID IID = |
2352 | 376 | IsRetainRV ? llvm::Intrinsic::objc_retainAutoreleasedReturnValue328 |
2353 | 376 | : llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue48 ; |
2354 | 376 | EP = getARCIntrinsic(IID, CGF.CGM); |
2355 | | |
2356 | 376 | llvm::Triple::ArchType Arch = CGF.CGM.getTriple().getArch(); |
2357 | | |
2358 | | // FIXME: Do this on all targets and at -O0 too. This can be enabled only if |
2359 | | // the target backend knows how to handle the operand bundle. |
2360 | 376 | if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0 && |
2361 | 376 | (145 Arch == llvm::Triple::aarch64145 || Arch == llvm::Triple::x86_64134 )) { |
2362 | 129 | llvm::Value *bundleArgs[] = {EP}; |
2363 | 129 | llvm::OperandBundleDef OB("clang.arc.attachedcall", bundleArgs); |
2364 | 129 | auto *oldCall = cast<llvm::CallBase>(value); |
2365 | 129 | llvm::CallBase *newCall = llvm::CallBase::addOperandBundle( |
2366 | 129 | oldCall, llvm::LLVMContext::OB_clang_arc_attachedcall, OB, oldCall); |
2367 | 129 | newCall->copyMetadata(*oldCall); |
2368 | 129 | oldCall->replaceAllUsesWith(newCall); |
2369 | 129 | oldCall->eraseFromParent(); |
2370 | 129 | CGF.EmitARCNoopIntrinsicUse(newCall); |
2371 | 129 | return newCall; |
2372 | 129 | } |
2373 | | |
2374 | 247 | bool isNoTail = |
2375 | 247 | CGF.CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail(); |
2376 | 247 | llvm::CallInst::TailCallKind tailKind = |
2377 | 247 | isNoTail ? llvm::CallInst::TCK_NoTail161 : llvm::CallInst::TCK_None86 ; |
2378 | 247 | return emitARCValueOperation(CGF, value, nullptr, EP, IID, tailKind); |
2379 | 376 | } |
2380 | | |
2381 | | /// Retain the given object which is the result of a function call. |
2382 | | /// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value) |
2383 | | /// |
2384 | | /// Yes, this function name is one character away from a different |
2385 | | /// call with completely different semantics. |
2386 | | llvm::Value * |
2387 | 328 | CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) { |
2388 | 328 | return emitOptimizedARCReturnCall(value, true, *this); |
2389 | 328 | } |
2390 | | |
2391 | | /// Claim a possibly-autoreleased return value at +0. This is only |
2392 | | /// valid to do in contexts which do not rely on the retain to keep |
2393 | | /// the object valid for all of its uses; for example, when |
2394 | | /// the value is ignored, or when it is being assigned to an |
2395 | | /// __unsafe_unretained variable. |
2396 | | /// |
2397 | | /// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value) |
2398 | | llvm::Value * |
2399 | 48 | CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) { |
2400 | 48 | return emitOptimizedARCReturnCall(value, false, *this); |
2401 | 48 | } |
2402 | | |
2403 | | /// Release the given object. |
2404 | | /// call void \@objc_release(i8* %value) |
2405 | | void CodeGenFunction::EmitARCRelease(llvm::Value *value, |
2406 | 742 | ARCPreciseLifetime_t precise) { |
2407 | 742 | if (isa<llvm::ConstantPointerNull>(value)) return0 ; |
2408 | | |
2409 | 742 | llvm::Function *&fn = CGM.getObjCEntrypoints().objc_release; |
2410 | 742 | if (!fn) |
2411 | 76 | fn = getARCIntrinsic(llvm::Intrinsic::objc_release, CGM); |
2412 | | |
2413 | | // Cast the argument to 'id'. |
2414 | 742 | value = Builder.CreateBitCast(value, Int8PtrTy); |
2415 | | |
2416 | | // Call objc_release. |
2417 | 742 | llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value); |
2418 | | |
2419 | 742 | if (precise == ARCImpreciseLifetime) { |
2420 | 701 | call->setMetadata("clang.imprecise_release", |
2421 | 701 | llvm::MDNode::get(Builder.getContext(), None)); |
2422 | 701 | } |
2423 | 742 | } |
2424 | | |
2425 | | /// Destroy a __strong variable. |
2426 | | /// |
2427 | | /// At -O0, emit a call to store 'null' into the address; |
2428 | | /// instrumenting tools prefer this because the address is exposed, |
2429 | | /// but it's relatively cumbersome to optimize. |
2430 | | /// |
2431 | | /// At -O1 and above, just load and call objc_release. |
2432 | | /// |
2433 | | /// call void \@objc_storeStrong(i8** %addr, i8* null) |
2434 | | void CodeGenFunction::EmitARCDestroyStrong(Address addr, |
2435 | 1.30k | ARCPreciseLifetime_t precise) { |
2436 | 1.30k | if (CGM.getCodeGenOpts().OptimizationLevel == 0) { |
2437 | 991 | llvm::Value *null = getNullForVariable(addr); |
2438 | 991 | EmitARCStoreStrongCall(addr, null, /*ignored*/ true); |
2439 | 991 | return; |
2440 | 991 | } |
2441 | | |
2442 | 311 | llvm::Value *value = Builder.CreateLoad(addr); |
2443 | 311 | EmitARCRelease(value, precise); |
2444 | 311 | } |
2445 | | |
2446 | | /// Store into a strong object. Always calls this: |
2447 | | /// call void \@objc_storeStrong(i8** %addr, i8* %value) |
2448 | | llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr, |
2449 | | llvm::Value *value, |
2450 | 1.46k | bool ignored) { |
2451 | 1.46k | assert(addr.getElementType() == value->getType()); |
2452 | | |
2453 | 0 | llvm::Function *&fn = CGM.getObjCEntrypoints().objc_storeStrong; |
2454 | 1.46k | if (!fn) |
2455 | 119 | fn = getARCIntrinsic(llvm::Intrinsic::objc_storeStrong, CGM); |
2456 | | |
2457 | 1.46k | llvm::Value *args[] = { |
2458 | 1.46k | Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy), |
2459 | 1.46k | Builder.CreateBitCast(value, Int8PtrTy) |
2460 | 1.46k | }; |
2461 | 1.46k | EmitNounwindRuntimeCall(fn, args); |
2462 | | |
2463 | 1.46k | if (ignored) return nullptr1.44k ; |
2464 | 19 | return value; |
2465 | 1.46k | } |
2466 | | |
2467 | | /// Store into a strong object. Sometimes calls this: |
2468 | | /// call void \@objc_storeStrong(i8** %addr, i8* %value) |
2469 | | /// Other times, breaks it down into components. |
2470 | | llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst, |
2471 | | llvm::Value *newValue, |
2472 | 111 | bool ignored) { |
2473 | 111 | QualType type = dst.getType(); |
2474 | 111 | bool isBlock = type->isBlockPointerType(); |
2475 | | |
2476 | | // Use a store barrier at -O0 unless this is a block type or the |
2477 | | // lvalue is inadequately aligned. |
2478 | 111 | if (shouldUseFusedARCCalls() && |
2479 | 111 | !isBlock101 && |
2480 | 111 | (99 dst.getAlignment().isZero()99 || |
2481 | 99 | dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) { |
2482 | 99 | return EmitARCStoreStrongCall(dst.getAddress(*this), newValue, ignored); |
2483 | 99 | } |
2484 | | |
2485 | | // Otherwise, split it out. |
2486 | | |
2487 | | // Retain the new value. |
2488 | 12 | newValue = EmitARCRetain(type, newValue); |
2489 | | |
2490 | | // Read the old value. |
2491 | 12 | llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation()); |
2492 | | |
2493 | | // Store. We do this before the release so that any deallocs won't |
2494 | | // see the old value. |
2495 | 12 | EmitStoreOfScalar(newValue, dst); |
2496 | | |
2497 | | // Finally, release the old value. |
2498 | 12 | EmitARCRelease(oldValue, dst.isARCPreciseLifetime()); |
2499 | | |
2500 | 12 | return newValue; |
2501 | 111 | } |
2502 | | |
2503 | | /// Autorelease the given object. |
2504 | | /// call i8* \@objc_autorelease(i8* %value) |
2505 | 11 | llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) { |
2506 | 11 | return emitARCValueOperation(*this, value, nullptr, |
2507 | 11 | CGM.getObjCEntrypoints().objc_autorelease, |
2508 | 11 | llvm::Intrinsic::objc_autorelease); |
2509 | 11 | } |
2510 | | |
2511 | | /// Autorelease the given object. |
2512 | | /// call i8* \@objc_autoreleaseReturnValue(i8* %value) |
2513 | | llvm::Value * |
2514 | 91 | CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) { |
2515 | 91 | return emitARCValueOperation(*this, value, nullptr, |
2516 | 91 | CGM.getObjCEntrypoints().objc_autoreleaseReturnValue, |
2517 | 91 | llvm::Intrinsic::objc_autoreleaseReturnValue, |
2518 | 91 | llvm::CallInst::TCK_Tail); |
2519 | 91 | } |
2520 | | |
2521 | | /// Do a fused retain/autorelease of the given object. |
2522 | | /// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value) |
2523 | | llvm::Value * |
2524 | 8 | CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) { |
2525 | 8 | return emitARCValueOperation(*this, value, nullptr, |
2526 | 8 | CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue, |
2527 | 8 | llvm::Intrinsic::objc_retainAutoreleaseReturnValue, |
2528 | 8 | llvm::CallInst::TCK_Tail); |
2529 | 8 | } |
2530 | | |
2531 | | /// Do a fused retain/autorelease of the given object. |
2532 | | /// call i8* \@objc_retainAutorelease(i8* %value) |
2533 | | /// or |
2534 | | /// %retain = call i8* \@objc_retainBlock(i8* %value) |
2535 | | /// call i8* \@objc_autorelease(i8* %retain) |
2536 | | llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type, |
2537 | 14 | llvm::Value *value) { |
2538 | 14 | if (!type->isBlockPointerType()) |
2539 | 10 | return EmitARCRetainAutoreleaseNonBlock(value); |
2540 | | |
2541 | 4 | if (isa<llvm::ConstantPointerNull>(value)) return value0 ; |
2542 | | |
2543 | 4 | llvm::Type *origType = value->getType(); |
2544 | 4 | value = Builder.CreateBitCast(value, Int8PtrTy); |
2545 | 4 | value = EmitARCRetainBlock(value, /*mandatory*/ true); |
2546 | 4 | value = EmitARCAutorelease(value); |
2547 | 4 | return Builder.CreateBitCast(value, origType); |
2548 | 4 | } |
2549 | | |
2550 | | /// Do a fused retain/autorelease of the given object. |
2551 | | /// call i8* \@objc_retainAutorelease(i8* %value) |
2552 | | llvm::Value * |
2553 | 10 | CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) { |
2554 | 10 | return emitARCValueOperation(*this, value, nullptr, |
2555 | 10 | CGM.getObjCEntrypoints().objc_retainAutorelease, |
2556 | 10 | llvm::Intrinsic::objc_retainAutorelease); |
2557 | 10 | } |
2558 | | |
2559 | | /// i8* \@objc_loadWeak(i8** %addr) |
2560 | | /// Essentially objc_autorelease(objc_loadWeakRetained(addr)). |
2561 | 16 | llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) { |
2562 | 16 | return emitARCLoadOperation(*this, addr, |
2563 | 16 | CGM.getObjCEntrypoints().objc_loadWeak, |
2564 | 16 | llvm::Intrinsic::objc_loadWeak); |
2565 | 16 | } |
2566 | | |
2567 | | /// i8* \@objc_loadWeakRetained(i8** %addr) |
2568 | 176 | llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) { |
2569 | 176 | return emitARCLoadOperation(*this, addr, |
2570 | 176 | CGM.getObjCEntrypoints().objc_loadWeakRetained, |
2571 | 176 | llvm::Intrinsic::objc_loadWeakRetained); |
2572 | 176 | } |
2573 | | |
2574 | | /// i8* \@objc_storeWeak(i8** %addr, i8* %value) |
2575 | | /// Returns %value. |
2576 | | llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr, |
2577 | | llvm::Value *value, |
2578 | 51 | bool ignored) { |
2579 | 51 | return emitARCStoreOperation(*this, addr, value, |
2580 | 51 | CGM.getObjCEntrypoints().objc_storeWeak, |
2581 | 51 | llvm::Intrinsic::objc_storeWeak, ignored); |
2582 | 51 | } |
2583 | | |
2584 | | /// i8* \@objc_initWeak(i8** %addr, i8* %value) |
2585 | | /// Returns %value. %addr is known to not have a current weak entry. |
2586 | | /// Essentially equivalent to: |
2587 | | /// *addr = nil; objc_storeWeak(addr, value); |
2588 | 187 | void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) { |
2589 | | // If we're initializing to null, just write null to memory; no need |
2590 | | // to get the runtime involved. But don't do this if optimization |
2591 | | // is enabled, because accounting for this would make the optimizer |
2592 | | // much more complicated. |
2593 | 187 | if (isa<llvm::ConstantPointerNull>(value) && |
2594 | 187 | CGM.getCodeGenOpts().OptimizationLevel == 0143 ) { |
2595 | 137 | Builder.CreateStore(value, addr); |
2596 | 137 | return; |
2597 | 137 | } |
2598 | | |
2599 | 50 | emitARCStoreOperation(*this, addr, value, |
2600 | 50 | CGM.getObjCEntrypoints().objc_initWeak, |
2601 | 50 | llvm::Intrinsic::objc_initWeak, /*ignored*/ true); |
2602 | 50 | } |
2603 | | |
2604 | | /// void \@objc_destroyWeak(i8** %addr) |
2605 | | /// Essentially objc_storeWeak(addr, nil). |
2606 | 554 | void CodeGenFunction::EmitARCDestroyWeak(Address addr) { |
2607 | 554 | llvm::Function *&fn = CGM.getObjCEntrypoints().objc_destroyWeak; |
2608 | 554 | if (!fn) |
2609 | 61 | fn = getARCIntrinsic(llvm::Intrinsic::objc_destroyWeak, CGM); |
2610 | | |
2611 | | // Cast the argument to 'id*'. |
2612 | 554 | addr = Builder.CreateElementBitCast(addr, Int8PtrTy); |
2613 | | |
2614 | 554 | EmitNounwindRuntimeCall(fn, addr.getPointer()); |
2615 | 554 | } |
2616 | | |
2617 | | /// void \@objc_moveWeak(i8** %dest, i8** %src) |
2618 | | /// Disregards the current value in %dest. Leaves %src pointing to nothing. |
2619 | | /// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)). |
2620 | 15 | void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) { |
2621 | 15 | emitARCCopyOperation(*this, dst, src, |
2622 | 15 | CGM.getObjCEntrypoints().objc_moveWeak, |
2623 | 15 | llvm::Intrinsic::objc_moveWeak); |
2624 | 15 | } |
2625 | | |
2626 | | /// void \@objc_copyWeak(i8** %dest, i8** %src) |
2627 | | /// Disregards the current value in %dest. Essentially |
2628 | | /// objc_release(objc_initWeak(dest, objc_readWeakRetained(src))) |
2629 | 311 | void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) { |
2630 | 311 | emitARCCopyOperation(*this, dst, src, |
2631 | 311 | CGM.getObjCEntrypoints().objc_copyWeak, |
2632 | 311 | llvm::Intrinsic::objc_copyWeak); |
2633 | 311 | } |
2634 | | |
2635 | | void CodeGenFunction::emitARCCopyAssignWeak(QualType Ty, Address DstAddr, |
2636 | 4 | Address SrcAddr) { |
2637 | 4 | llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr); |
2638 | 4 | Object = EmitObjCConsumeObject(Ty, Object); |
2639 | 4 | EmitARCStoreWeak(DstAddr, Object, false); |
2640 | 4 | } |
2641 | | |
2642 | | void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr, |
2643 | 4 | Address SrcAddr) { |
2644 | 4 | llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr); |
2645 | 4 | Object = EmitObjCConsumeObject(Ty, Object); |
2646 | 4 | EmitARCStoreWeak(DstAddr, Object, false); |
2647 | 4 | EmitARCDestroyWeak(SrcAddr); |
2648 | 4 | } |
2649 | | |
2650 | | /// Produce the code to do a objc_autoreleasepool_push. |
2651 | | /// call i8* \@objc_autoreleasePoolPush(void) |
2652 | 100 | llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() { |
2653 | 100 | llvm::Function *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush; |
2654 | 100 | if (!fn) |
2655 | 98 | fn = getARCIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPush, CGM); |
2656 | | |
2657 | 100 | return EmitNounwindRuntimeCall(fn); |
2658 | 100 | } |
2659 | | |
2660 | | /// Produce the code to do a primitive release. |
2661 | | /// call void \@objc_autoreleasePoolPop(i8* %ptr) |
2662 | 100 | void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) { |
2663 | 100 | assert(value->getType() == Int8PtrTy); |
2664 | | |
2665 | 100 | if (getInvokeDest()) { |
2666 | | // Call the runtime method not the intrinsic if we are handling exceptions |
2667 | 2 | llvm::FunctionCallee &fn = |
2668 | 2 | CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke; |
2669 | 2 | if (!fn) { |
2670 | 2 | llvm::FunctionType *fnType = |
2671 | 2 | llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); |
2672 | 2 | fn = CGM.CreateRuntimeFunction(fnType, "objc_autoreleasePoolPop"); |
2673 | 2 | setARCRuntimeFunctionLinkage(CGM, fn); |
2674 | 2 | } |
2675 | | |
2676 | | // objc_autoreleasePoolPop can throw. |
2677 | 2 | EmitRuntimeCallOrInvoke(fn, value); |
2678 | 98 | } else { |
2679 | 98 | llvm::FunctionCallee &fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop; |
2680 | 98 | if (!fn) |
2681 | 98 | fn = getARCIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPop, CGM); |
2682 | | |
2683 | 98 | EmitRuntimeCall(fn, value); |
2684 | 98 | } |
2685 | 100 | } |
2686 | | |
2687 | | /// Produce the code to do an MRR version objc_autoreleasepool_push. |
2688 | | /// Which is: [[NSAutoreleasePool alloc] init]; |
2689 | | /// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class. |
2690 | | /// init is declared as: - (id) init; in its NSObject super class. |
2691 | | /// |
2692 | 17 | llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() { |
2693 | 17 | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
2694 | 17 | llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this); |
2695 | | // [NSAutoreleasePool alloc] |
2696 | 17 | IdentifierInfo *II = &CGM.getContext().Idents.get("alloc"); |
2697 | 17 | Selector AllocSel = getContext().Selectors.getSelector(0, &II); |
2698 | 17 | CallArgList Args; |
2699 | 17 | RValue AllocRV = |
2700 | 17 | Runtime.GenerateMessageSend(*this, ReturnValueSlot(), |
2701 | 17 | getContext().getObjCIdType(), |
2702 | 17 | AllocSel, Receiver, Args); |
2703 | | |
2704 | | // [Receiver init] |
2705 | 17 | Receiver = AllocRV.getScalarVal(); |
2706 | 17 | II = &CGM.getContext().Idents.get("init"); |
2707 | 17 | Selector InitSel = getContext().Selectors.getSelector(0, &II); |
2708 | 17 | RValue InitRV = |
2709 | 17 | Runtime.GenerateMessageSend(*this, ReturnValueSlot(), |
2710 | 17 | getContext().getObjCIdType(), |
2711 | 17 | InitSel, Receiver, Args); |
2712 | 17 | return InitRV.getScalarVal(); |
2713 | 17 | } |
2714 | | |
2715 | | /// Allocate the given objc object. |
2716 | | /// call i8* \@objc_alloc(i8* %value) |
2717 | | llvm::Value *CodeGenFunction::EmitObjCAlloc(llvm::Value *value, |
2718 | 886 | llvm::Type *resultType) { |
2719 | 886 | return emitObjCValueOperation(*this, value, resultType, |
2720 | 886 | CGM.getObjCEntrypoints().objc_alloc, |
2721 | 886 | "objc_alloc"); |
2722 | 886 | } |
2723 | | |
2724 | | /// Allocate the given objc object. |
2725 | | /// call i8* \@objc_allocWithZone(i8* %value) |
2726 | | llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value, |
2727 | 25 | llvm::Type *resultType) { |
2728 | 25 | return emitObjCValueOperation(*this, value, resultType, |
2729 | 25 | CGM.getObjCEntrypoints().objc_allocWithZone, |
2730 | 25 | "objc_allocWithZone"); |
2731 | 25 | } |
2732 | | |
2733 | | llvm::Value *CodeGenFunction::EmitObjCAllocInit(llvm::Value *value, |
2734 | 369 | llvm::Type *resultType) { |
2735 | 369 | return emitObjCValueOperation(*this, value, resultType, |
2736 | 369 | CGM.getObjCEntrypoints().objc_alloc_init, |
2737 | 369 | "objc_alloc_init"); |
2738 | 369 | } |
2739 | | |
2740 | | /// Produce the code to do a primitive release. |
2741 | | /// [tmp drain]; |
2742 | 17 | void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) { |
2743 | 17 | IdentifierInfo *II = &CGM.getContext().Idents.get("drain"); |
2744 | 17 | Selector DrainSel = getContext().Selectors.getSelector(0, &II); |
2745 | 17 | CallArgList Args; |
2746 | 17 | CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), |
2747 | 17 | getContext().VoidTy, DrainSel, Arg, Args); |
2748 | 17 | } |
2749 | | |
2750 | | void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF, |
2751 | | Address addr, |
2752 | 53 | QualType type) { |
2753 | 53 | CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime); |
2754 | 53 | } |
2755 | | |
2756 | | void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF, |
2757 | | Address addr, |
2758 | 1.22k | QualType type) { |
2759 | 1.22k | CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime); |
2760 | 1.22k | } |
2761 | | |
2762 | | void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF, |
2763 | | Address addr, |
2764 | 538 | QualType type) { |
2765 | 538 | CGF.EmitARCDestroyWeak(addr); |
2766 | 538 | } |
2767 | | |
2768 | | void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr, |
2769 | 6 | QualType type) { |
2770 | 6 | llvm::Value *value = CGF.Builder.CreateLoad(addr); |
2771 | 6 | CGF.EmitARCIntrinsicUse(value); |
2772 | 6 | } |
2773 | | |
2774 | | /// Autorelease the given object. |
2775 | | /// call i8* \@objc_autorelease(i8* %value) |
2776 | | llvm::Value *CodeGenFunction::EmitObjCAutorelease(llvm::Value *value, |
2777 | 43 | llvm::Type *returnType) { |
2778 | 43 | return emitObjCValueOperation( |
2779 | 43 | *this, value, returnType, |
2780 | 43 | CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction, |
2781 | 43 | "objc_autorelease"); |
2782 | 43 | } |
2783 | | |
2784 | | /// Retain the given object, with normal retain semantics. |
2785 | | /// call i8* \@objc_retain(i8* %value) |
2786 | | llvm::Value *CodeGenFunction::EmitObjCRetainNonBlock(llvm::Value *value, |
2787 | 17 | llvm::Type *returnType) { |
2788 | 17 | return emitObjCValueOperation( |
2789 | 17 | *this, value, returnType, |
2790 | 17 | CGM.getObjCEntrypoints().objc_retainRuntimeFunction, "objc_retain"); |
2791 | 17 | } |
2792 | | |
2793 | | /// Release the given object. |
2794 | | /// call void \@objc_release(i8* %value) |
2795 | | void CodeGenFunction::EmitObjCRelease(llvm::Value *value, |
2796 | 186 | ARCPreciseLifetime_t precise) { |
2797 | 186 | if (isa<llvm::ConstantPointerNull>(value)) return0 ; |
2798 | | |
2799 | 186 | llvm::FunctionCallee &fn = |
2800 | 186 | CGM.getObjCEntrypoints().objc_releaseRuntimeFunction; |
2801 | 186 | if (!fn) { |
2802 | 95 | llvm::FunctionType *fnType = |
2803 | 95 | llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); |
2804 | 95 | fn = CGM.CreateRuntimeFunction(fnType, "objc_release"); |
2805 | 95 | setARCRuntimeFunctionLinkage(CGM, fn); |
2806 | | // We have Native ARC, so set nonlazybind attribute for performance |
2807 | 95 | if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee())) |
2808 | 95 | f->addFnAttr(llvm::Attribute::NonLazyBind); |
2809 | 95 | } |
2810 | | |
2811 | | // Cast the argument to 'id'. |
2812 | 186 | value = Builder.CreateBitCast(value, Int8PtrTy); |
2813 | | |
2814 | | // Call objc_release. |
2815 | 186 | llvm::CallBase *call = EmitCallOrInvoke(fn, value); |
2816 | | |
2817 | 186 | if (precise == ARCImpreciseLifetime) { |
2818 | 0 | call->setMetadata("clang.imprecise_release", |
2819 | 0 | llvm::MDNode::get(Builder.getContext(), None)); |
2820 | 0 | } |
2821 | 186 | } |
2822 | | |
2823 | | namespace { |
2824 | | struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup { |
2825 | | llvm::Value *Token; |
2826 | | |
2827 | 100 | CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {} |
2828 | | |
2829 | 100 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2830 | 100 | CGF.EmitObjCAutoreleasePoolPop(Token); |
2831 | 100 | } |
2832 | | }; |
2833 | | struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup { |
2834 | | llvm::Value *Token; |
2835 | | |
2836 | 17 | CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {} |
2837 | | |
2838 | 17 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2839 | 17 | CGF.EmitObjCMRRAutoreleasePoolPop(Token); |
2840 | 17 | } |
2841 | | }; |
2842 | | } |
2843 | | |
2844 | 4 | void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) { |
2845 | 4 | if (CGM.getLangOpts().ObjCAutoRefCount) |
2846 | 4 | EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr); |
2847 | 0 | else |
2848 | 0 | EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr); |
2849 | 4 | } |
2850 | | |
2851 | 369 | static bool shouldRetainObjCLifetime(Qualifiers::ObjCLifetime lifetime) { |
2852 | 369 | switch (lifetime) { |
2853 | 4 | case Qualifiers::OCL_None: |
2854 | 7 | case Qualifiers::OCL_ExplicitNone: |
2855 | 344 | case Qualifiers::OCL_Strong: |
2856 | 344 | case Qualifiers::OCL_Autoreleasing: |
2857 | 344 | return true; |
2858 | | |
2859 | 25 | case Qualifiers::OCL_Weak: |
2860 | 25 | return false; |
2861 | 369 | } |
2862 | | |
2863 | 0 | llvm_unreachable("impossible lifetime!"); |
2864 | 0 | } |
2865 | | |
2866 | | static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, |
2867 | | LValue lvalue, |
2868 | 367 | QualType type) { |
2869 | 367 | llvm::Value *result; |
2870 | 367 | bool shouldRetain = shouldRetainObjCLifetime(type.getObjCLifetime()); |
2871 | 367 | if (shouldRetain) { |
2872 | 343 | result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal(); |
2873 | 343 | } else { |
2874 | 24 | assert(type.getObjCLifetime() == Qualifiers::OCL_Weak); |
2875 | 0 | result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress(CGF)); |
2876 | 24 | } |
2877 | 0 | return TryEmitResult(result, !shouldRetain); |
2878 | 367 | } |
2879 | | |
2880 | | static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, |
2881 | 367 | const Expr *e) { |
2882 | 367 | e = e->IgnoreParens(); |
2883 | 367 | QualType type = e->getType(); |
2884 | | |
2885 | | // If we're loading retained from a __strong xvalue, we can avoid |
2886 | | // an extra retain/release pair by zeroing out the source of this |
2887 | | // "move" operation. |
2888 | 367 | if (e->isXValue() && |
2889 | 367 | !type.isConstQualified()4 && |
2890 | 367 | type.getObjCLifetime() == Qualifiers::OCL_Strong3 ) { |
2891 | | // Emit the lvalue. |
2892 | 3 | LValue lv = CGF.EmitLValue(e); |
2893 | | |
2894 | | // Load the object pointer. |
2895 | 3 | llvm::Value *result = CGF.EmitLoadOfLValue(lv, |
2896 | 3 | SourceLocation()).getScalarVal(); |
2897 | | |
2898 | | // Set the source pointer to NULL. |
2899 | 3 | CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress(CGF)), lv); |
2900 | | |
2901 | 3 | return TryEmitResult(result, true); |
2902 | 3 | } |
2903 | | |
2904 | | // As a very special optimization, in ARC++, if the l-value is the |
2905 | | // result of a non-volatile assignment, do a simple retain of the |
2906 | | // result of the call to objc_storeWeak instead of reloading. |
2907 | 364 | if (CGF.getLangOpts().CPlusPlus && |
2908 | 364 | !type.isVolatileQualified()121 && |
2909 | 364 | type.getObjCLifetime() == Qualifiers::OCL_Weak120 && |
2910 | 364 | isa<BinaryOperator>(e)2 && |
2911 | 364 | cast<BinaryOperator>(e)->getOpcode() == BO_Assign1 ) |
2912 | 1 | return TryEmitResult(CGF.EmitScalarExpr(e), false); |
2913 | | |
2914 | | // Try to emit code for scalar constant instead of emitting LValue and |
2915 | | // loading it because we are not guaranteed to have an l-value. One of such |
2916 | | // cases is DeclRefExpr referencing non-odr-used constant-evaluated variable. |
2917 | 363 | if (const auto *decl_expr = dyn_cast<DeclRefExpr>(e)) { |
2918 | 325 | auto *DRE = const_cast<DeclRefExpr *>(decl_expr); |
2919 | 325 | if (CodeGenFunction::ConstantEmission constant = CGF.tryEmitAsConstant(DRE)) |
2920 | 2 | return TryEmitResult(CGF.emitScalarConstant(constant, DRE), |
2921 | 2 | !shouldRetainObjCLifetime(type.getObjCLifetime())); |
2922 | 325 | } |
2923 | | |
2924 | 361 | return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type); |
2925 | 363 | } |
2926 | | |
2927 | | typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, |
2928 | | llvm::Value *value)> |
2929 | | ValueTransform; |
2930 | | |
2931 | | /// Insert code immediately after a call. |
2932 | | |
2933 | | // FIXME: We should find a way to emit the runtime call immediately |
2934 | | // after the call is emitted to eliminate the need for this function. |
2935 | | static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF, |
2936 | | llvm::Value *value, |
2937 | | ValueTransform doAfterCall, |
2938 | 388 | ValueTransform doFallback) { |
2939 | 388 | CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); |
2940 | 388 | auto *callBase = dyn_cast<llvm::CallBase>(value); |
2941 | | |
2942 | 388 | if (callBase && llvm::objcarc::hasAttachedCallOpBundle(callBase)375 ) { |
2943 | | // Fall back if the call base has operand bundle "clang.arc.attachedcall". |
2944 | 1 | value = doFallback(CGF, value); |
2945 | 387 | } else if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) { |
2946 | | // Place the retain immediately following the call. |
2947 | 360 | CGF.Builder.SetInsertPoint(call->getParent(), |
2948 | 360 | ++llvm::BasicBlock::iterator(call)); |
2949 | 360 | value = doAfterCall(CGF, value); |
2950 | 360 | } else if (llvm::InvokeInst *27 invoke27 = dyn_cast<llvm::InvokeInst>(value)) { |
2951 | | // Place the retain at the beginning of the normal destination block. |
2952 | 14 | llvm::BasicBlock *BB = invoke->getNormalDest(); |
2953 | 14 | CGF.Builder.SetInsertPoint(BB, BB->begin()); |
2954 | 14 | value = doAfterCall(CGF, value); |
2955 | | |
2956 | | // Bitcasts can arise because of related-result returns. Rewrite |
2957 | | // the operand. |
2958 | 14 | } else if (llvm::BitCastInst *13 bitcast13 = dyn_cast<llvm::BitCastInst>(value)) { |
2959 | | // Change the insert point to avoid emitting the fall-back call after the |
2960 | | // bitcast. |
2961 | 10 | CGF.Builder.SetInsertPoint(bitcast->getParent(), bitcast->getIterator()); |
2962 | 10 | llvm::Value *operand = bitcast->getOperand(0); |
2963 | 10 | operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback); |
2964 | 10 | bitcast->setOperand(0, operand); |
2965 | 10 | value = bitcast; |
2966 | 10 | } else { |
2967 | 3 | auto *phi = dyn_cast<llvm::PHINode>(value); |
2968 | 3 | if (phi && phi->getNumIncomingValues() == 22 && |
2969 | 3 | isa<llvm::ConstantPointerNull>(phi->getIncomingValue(1))2 && |
2970 | 3 | isa<llvm::CallBase>(phi->getIncomingValue(0))2 ) { |
2971 | | // Handle phi instructions that are generated when it's necessary to check |
2972 | | // whether the receiver of a message is null. |
2973 | 2 | llvm::Value *inVal = phi->getIncomingValue(0); |
2974 | 2 | inVal = emitARCOperationAfterCall(CGF, inVal, doAfterCall, doFallback); |
2975 | 2 | phi->setIncomingValue(0, inVal); |
2976 | 2 | value = phi; |
2977 | 2 | } else { |
2978 | | // Generic fall-back case. |
2979 | | // Retain using the non-block variant: we never need to do a copy |
2980 | | // of a block that's been returned to us. |
2981 | 1 | value = doFallback(CGF, value); |
2982 | 1 | } |
2983 | 3 | } |
2984 | | |
2985 | 388 | CGF.Builder.restoreIP(ip); |
2986 | 388 | return value; |
2987 | 388 | } |
2988 | | |
2989 | | /// Given that the given expression is some sort of call (which does |
2990 | | /// not return retained), emit a retain following it. |
2991 | | static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF, |
2992 | 328 | const Expr *e) { |
2993 | 328 | llvm::Value *value = CGF.EmitScalarExpr(e); |
2994 | 328 | return emitARCOperationAfterCall(CGF, value, |
2995 | 328 | [](CodeGenFunction &CGF, llvm::Value *value) { |
2996 | 326 | return CGF.EmitARCRetainAutoreleasedReturnValue(value); |
2997 | 326 | }, |
2998 | 328 | [](CodeGenFunction &CGF, llvm::Value *value) { |
2999 | 2 | return CGF.EmitARCRetainNonBlock(value); |
3000 | 2 | }); |
3001 | 328 | } |
3002 | | |
3003 | | /// Given that the given expression is some sort of call (which does |
3004 | | /// not return retained), perform an unsafeClaim following it. |
3005 | | static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF, |
3006 | 48 | const Expr *e) { |
3007 | 48 | llvm::Value *value = CGF.EmitScalarExpr(e); |
3008 | 48 | return emitARCOperationAfterCall(CGF, value, |
3009 | 48 | [](CodeGenFunction &CGF, llvm::Value *value) { |
3010 | 48 | return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value); |
3011 | 48 | }, |
3012 | 48 | [](CodeGenFunction &CGF, llvm::Value *value) { |
3013 | 0 | return value; |
3014 | 0 | }); |
3015 | 48 | } |
3016 | | |
3017 | | llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E, |
3018 | 151 | bool allowUnsafeClaim) { |
3019 | 151 | if (allowUnsafeClaim && |
3020 | 151 | CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()73 ) { |
3021 | 48 | return emitARCUnsafeClaimCallResult(*this, E); |
3022 | 103 | } else { |
3023 | 103 | llvm::Value *value = emitARCRetainCallResult(*this, E); |
3024 | 103 | return EmitObjCConsumeObject(E->getType(), value); |
3025 | 103 | } |
3026 | 151 | } |
3027 | | |
3028 | | /// Determine whether it might be important to emit a separate |
3029 | | /// objc_retain_block on the result of the given expression, or |
3030 | | /// whether it's okay to just emit it in a +1 context. |
3031 | 24 | static bool shouldEmitSeparateBlockRetain(const Expr *e) { |
3032 | 24 | assert(e->getType()->isBlockPointerType()); |
3033 | 0 | e = e->IgnoreParens(); |
3034 | | |
3035 | | // For future goodness, emit block expressions directly in +1 |
3036 | | // contexts if we can. |
3037 | 24 | if (isa<BlockExpr>(e)) |
3038 | 19 | return false; |
3039 | | |
3040 | 5 | if (const CastExpr *cast = dyn_cast<CastExpr>(e)) { |
3041 | 4 | switch (cast->getCastKind()) { |
3042 | | // Emitting these operations in +1 contexts is goodness. |
3043 | 4 | case CK_LValueToRValue: |
3044 | 4 | case CK_ARCReclaimReturnedObject: |
3045 | 4 | case CK_ARCConsumeObject: |
3046 | 4 | case CK_ARCProduceObject: |
3047 | 4 | return false; |
3048 | | |
3049 | | // These operations preserve a block type. |
3050 | 0 | case CK_NoOp: |
3051 | 0 | case CK_BitCast: |
3052 | 0 | return shouldEmitSeparateBlockRetain(cast->getSubExpr()); |
3053 | | |
3054 | | // These operations are known to be bad (or haven't been considered). |
3055 | 0 | case CK_AnyPointerToBlockPointerCast: |
3056 | 0 | default: |
3057 | 0 | return true; |
3058 | 4 | } |
3059 | 4 | } |
3060 | | |
3061 | 1 | return true; |
3062 | 5 | } |
3063 | | |
3064 | | namespace { |
3065 | | /// A CRTP base class for emitting expressions of retainable object |
3066 | | /// pointer type in ARC. |
3067 | | template <typename Impl, typename Result> class ARCExprEmitter { |
3068 | | protected: |
3069 | | CodeGenFunction &CGF; |
3070 | 3.01k | Impl &asImpl() { return *static_cast<Impl*>(this); } CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::asImpl() Line | Count | Source | 3070 | 2.50k | Impl &asImpl() { return *static_cast<Impl*>(this); } |
CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::asImpl() Line | Count | Source | 3070 | 508 | Impl &asImpl() { return *static_cast<Impl*>(this); } |
|
3071 | | |
3072 | 1.32k | ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {} CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::ARCExprEmitter(clang::CodeGen::CodeGenFunction&) Line | Count | Source | 3072 | 1.16k | ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {} |
CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::ARCExprEmitter(clang::CodeGen::CodeGenFunction&) Line | Count | Source | 3072 | 160 | ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {} |
|
3073 | | |
3074 | | public: |
3075 | | Result visit(const Expr *e); |
3076 | | Result visitCastExpr(const CastExpr *e); |
3077 | | Result visitPseudoObjectExpr(const PseudoObjectExpr *e); |
3078 | | Result visitBlockExpr(const BlockExpr *e); |
3079 | | Result visitBinaryOperator(const BinaryOperator *e); |
3080 | | Result visitBinAssign(const BinaryOperator *e); |
3081 | | Result visitBinAssignUnsafeUnretained(const BinaryOperator *e); |
3082 | | Result visitBinAssignAutoreleasing(const BinaryOperator *e); |
3083 | | Result visitBinAssignWeak(const BinaryOperator *e); |
3084 | | Result visitBinAssignStrong(const BinaryOperator *e); |
3085 | | |
3086 | | // Minimal implementation: |
3087 | | // Result visitLValueToRValue(const Expr *e) |
3088 | | // Result visitConsumeObject(const Expr *e) |
3089 | | // Result visitExtendBlockObject(const Expr *e) |
3090 | | // Result visitReclaimReturnedObject(const Expr *e) |
3091 | | // Result visitCall(const Expr *e) |
3092 | | // Result visitExpr(const Expr *e) |
3093 | | // |
3094 | | // Result emitBitCast(Result result, llvm::Type *resultType) |
3095 | | // llvm::Value *getValueOfResult(Result result) |
3096 | | }; |
3097 | | } |
3098 | | |
3099 | | /// Try to emit a PseudoObjectExpr under special ARC rules. |
3100 | | /// |
3101 | | /// This massively duplicates emitPseudoObjectRValue. |
3102 | | template <typename Impl, typename Result> |
3103 | | Result |
3104 | 10 | ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) { |
3105 | 10 | SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; |
3106 | | |
3107 | | // Find the result expression. |
3108 | 10 | const Expr *resultExpr = E->getResultExpr(); |
3109 | 10 | assert(resultExpr); |
3110 | 0 | Result result; |
3111 | | |
3112 | 10 | for (PseudoObjectExpr::const_semantics_iterator |
3113 | 31 | i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i21 ) { |
3114 | 21 | const Expr *semantic = *i; |
3115 | | |
3116 | | // If this semantic expression is an opaque value, bind it |
3117 | | // to the result of its source expression. |
3118 | 21 | if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { |
3119 | 11 | typedef CodeGenFunction::OpaqueValueMappingData OVMA; |
3120 | 11 | OVMA opaqueData; |
3121 | | |
3122 | | // If this semantic is the result of the pseudo-object |
3123 | | // expression, try to evaluate the source as +1. |
3124 | 11 | if (ov == resultExpr) { |
3125 | 0 | assert(!OVMA::shouldBindAsLValue(ov)); |
3126 | 0 | result = asImpl().visit(ov->getSourceExpr()); |
3127 | 0 | opaqueData = OVMA::bind(CGF, ov, |
3128 | 0 | RValue::get(asImpl().getValueOfResult(result))); |
3129 | | |
3130 | | // Otherwise, just bind it. |
3131 | 11 | } else { |
3132 | 11 | opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); |
3133 | 11 | } |
3134 | 0 | opaques.push_back(opaqueData); |
3135 | | |
3136 | | // Otherwise, if the expression is the result, evaluate it |
3137 | | // and remember the result. |
3138 | 11 | } else if (10 semantic == resultExpr10 ) { |
3139 | 10 | result = asImpl().visit(semantic); |
3140 | | |
3141 | | // Otherwise, evaluate the expression in an ignored context. |
3142 | 10 | } else { |
3143 | 0 | CGF.EmitIgnoredExpr(semantic); |
3144 | 0 | } |
3145 | 21 | } |
3146 | | |
3147 | | // Unbind all the opaques now. |
3148 | 21 | for (unsigned i = 0, e = opaques.size(); i != e; ++i11 ) |
3149 | 11 | opaques[i].unbind(CGF); |
3150 | | |
3151 | 10 | return result; |
3152 | 10 | } CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitPseudoObjectExpr(clang::PseudoObjectExpr const*) Line | Count | Source | 3104 | 10 | ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) { | 3105 | 10 | SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; | 3106 | | | 3107 | | // Find the result expression. | 3108 | 10 | const Expr *resultExpr = E->getResultExpr(); | 3109 | 10 | assert(resultExpr); | 3110 | 0 | Result result; | 3111 | | | 3112 | 10 | for (PseudoObjectExpr::const_semantics_iterator | 3113 | 31 | i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i21 ) { | 3114 | 21 | const Expr *semantic = *i; | 3115 | | | 3116 | | // If this semantic expression is an opaque value, bind it | 3117 | | // to the result of its source expression. | 3118 | 21 | if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { | 3119 | 11 | typedef CodeGenFunction::OpaqueValueMappingData OVMA; | 3120 | 11 | OVMA opaqueData; | 3121 | | | 3122 | | // If this semantic is the result of the pseudo-object | 3123 | | // expression, try to evaluate the source as +1. | 3124 | 11 | if (ov == resultExpr) { | 3125 | 0 | assert(!OVMA::shouldBindAsLValue(ov)); | 3126 | 0 | result = asImpl().visit(ov->getSourceExpr()); | 3127 | 0 | opaqueData = OVMA::bind(CGF, ov, | 3128 | 0 | RValue::get(asImpl().getValueOfResult(result))); | 3129 | | | 3130 | | // Otherwise, just bind it. | 3131 | 11 | } else { | 3132 | 11 | opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); | 3133 | 11 | } | 3134 | 0 | opaques.push_back(opaqueData); | 3135 | | | 3136 | | // Otherwise, if the expression is the result, evaluate it | 3137 | | // and remember the result. | 3138 | 11 | } else if (10 semantic == resultExpr10 ) { | 3139 | 10 | result = asImpl().visit(semantic); | 3140 | | | 3141 | | // Otherwise, evaluate the expression in an ignored context. | 3142 | 10 | } else { | 3143 | 0 | CGF.EmitIgnoredExpr(semantic); | 3144 | 0 | } | 3145 | 21 | } | 3146 | | | 3147 | | // Unbind all the opaques now. | 3148 | 21 | for (unsigned i = 0, e = opaques.size(); i != e; ++i11 ) | 3149 | 11 | opaques[i].unbind(CGF); | 3150 | | | 3151 | 10 | return result; | 3152 | 10 | } |
Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitPseudoObjectExpr(clang::PseudoObjectExpr const*) |
3153 | | |
3154 | | template <typename Impl, typename Result> |
3155 | 0 | Result ARCExprEmitter<Impl, Result>::visitBlockExpr(const BlockExpr *e) { |
3156 | | // The default implementation just forwards the expression to visitExpr. |
3157 | 0 | return asImpl().visitExpr(e); |
3158 | 0 | } |
3159 | | |
3160 | | template <typename Impl, typename Result> |
3161 | 1.01k | Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) { |
3162 | 1.01k | switch (e->getCastKind()) { |
3163 | | |
3164 | | // No-op casts don't change the type, so we just ignore them. |
3165 | 18 | case CK_NoOp: |
3166 | 18 | return asImpl().visit(e->getSubExpr()); |
3167 | | |
3168 | | // These casts can change the type. |
3169 | 11 | case CK_CPointerToObjCPointerCast: |
3170 | 24 | case CK_BlockPointerToObjCPointerCast: |
3171 | 25 | case CK_AnyPointerToBlockPointerCast: |
3172 | 151 | case CK_BitCast: { |
3173 | 151 | llvm::Type *resultType = CGF.ConvertType(e->getType()); |
3174 | 151 | assert(e->getSubExpr()->getType()->hasPointerRepresentation()); |
3175 | 0 | Result result = asImpl().visit(e->getSubExpr()); |
3176 | 151 | return asImpl().emitBitCast(result, resultType); |
3177 | 25 | } |
3178 | | |
3179 | | // Handle some casts specially. |
3180 | 383 | case CK_LValueToRValue: |
3181 | 383 | return asImpl().visitLValueToRValue(e->getSubExpr()); |
3182 | 86 | case CK_ARCConsumeObject: |
3183 | 86 | return asImpl().visitConsumeObject(e->getSubExpr()); |
3184 | 13 | case CK_ARCExtendBlockObject: |
3185 | 13 | return asImpl().visitExtendBlockObject(e->getSubExpr()); |
3186 | 258 | case CK_ARCReclaimReturnedObject: |
3187 | 258 | return asImpl().visitReclaimReturnedObject(e->getSubExpr()); |
3188 | | |
3189 | | // Otherwise, use the default logic. |
3190 | 102 | default: |
3191 | 102 | return asImpl().visitExpr(e); |
3192 | 1.01k | } |
3193 | 1.01k | } CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitCastExpr(clang::CastExpr const*) Line | Count | Source | 3161 | 905 | Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) { | 3162 | 905 | switch (e->getCastKind()) { | 3163 | | | 3164 | | // No-op casts don't change the type, so we just ignore them. | 3165 | 18 | case CK_NoOp: | 3166 | 18 | return asImpl().visit(e->getSubExpr()); | 3167 | | | 3168 | | // These casts can change the type. | 3169 | 10 | case CK_CPointerToObjCPointerCast: | 3170 | 23 | case CK_BlockPointerToObjCPointerCast: | 3171 | 24 | case CK_AnyPointerToBlockPointerCast: | 3172 | 110 | case CK_BitCast: { | 3173 | 110 | llvm::Type *resultType = CGF.ConvertType(e->getType()); | 3174 | 110 | assert(e->getSubExpr()->getType()->hasPointerRepresentation()); | 3175 | 0 | Result result = asImpl().visit(e->getSubExpr()); | 3176 | 110 | return asImpl().emitBitCast(result, resultType); | 3177 | 24 | } | 3178 | | | 3179 | | // Handle some casts specially. | 3180 | 367 | case CK_LValueToRValue: | 3181 | 367 | return asImpl().visitLValueToRValue(e->getSubExpr()); | 3182 | 82 | case CK_ARCConsumeObject: | 3183 | 82 | return asImpl().visitConsumeObject(e->getSubExpr()); | 3184 | 13 | case CK_ARCExtendBlockObject: | 3185 | 13 | return asImpl().visitExtendBlockObject(e->getSubExpr()); | 3186 | 215 | case CK_ARCReclaimReturnedObject: | 3187 | 215 | return asImpl().visitReclaimReturnedObject(e->getSubExpr()); | 3188 | | | 3189 | | // Otherwise, use the default logic. | 3190 | 100 | default: | 3191 | 100 | return asImpl().visitExpr(e); | 3192 | 905 | } | 3193 | 905 | } |
CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitCastExpr(clang::CastExpr const*) Line | Count | Source | 3161 | 106 | Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) { | 3162 | 106 | switch (e->getCastKind()) { | 3163 | | | 3164 | | // No-op casts don't change the type, so we just ignore them. | 3165 | 0 | case CK_NoOp: | 3166 | 0 | return asImpl().visit(e->getSubExpr()); | 3167 | | | 3168 | | // These casts can change the type. | 3169 | 1 | case CK_CPointerToObjCPointerCast: | 3170 | 1 | case CK_BlockPointerToObjCPointerCast: | 3171 | 1 | case CK_AnyPointerToBlockPointerCast: | 3172 | 41 | case CK_BitCast: { | 3173 | 41 | llvm::Type *resultType = CGF.ConvertType(e->getType()); | 3174 | 41 | assert(e->getSubExpr()->getType()->hasPointerRepresentation()); | 3175 | 0 | Result result = asImpl().visit(e->getSubExpr()); | 3176 | 41 | return asImpl().emitBitCast(result, resultType); | 3177 | 1 | } | 3178 | | | 3179 | | // Handle some casts specially. | 3180 | 16 | case CK_LValueToRValue: | 3181 | 16 | return asImpl().visitLValueToRValue(e->getSubExpr()); | 3182 | 4 | case CK_ARCConsumeObject: | 3183 | 4 | return asImpl().visitConsumeObject(e->getSubExpr()); | 3184 | 0 | case CK_ARCExtendBlockObject: | 3185 | 0 | return asImpl().visitExtendBlockObject(e->getSubExpr()); | 3186 | 43 | case CK_ARCReclaimReturnedObject: | 3187 | 43 | return asImpl().visitReclaimReturnedObject(e->getSubExpr()); | 3188 | | | 3189 | | // Otherwise, use the default logic. | 3190 | 2 | default: | 3191 | 2 | return asImpl().visitExpr(e); | 3192 | 106 | } | 3193 | 106 | } |
|
3194 | | |
3195 | | template <typename Impl, typename Result> |
3196 | | Result |
3197 | 76 | ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) { |
3198 | 76 | switch (e->getOpcode()) { |
3199 | 0 | case BO_Comma: |
3200 | 0 | CGF.EmitIgnoredExpr(e->getLHS()); |
3201 | 0 | CGF.EnsureInsertPoint(); |
3202 | 0 | return asImpl().visit(e->getRHS()); |
3203 | | |
3204 | 76 | case BO_Assign: |
3205 | 76 | return asImpl().visitBinAssign(e); |
3206 | | |
3207 | 0 | default: |
3208 | 0 | return asImpl().visitExpr(e); |
3209 | 76 | } |
3210 | 76 | } CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitBinaryOperator(clang::BinaryOperator const*) Line | Count | Source | 3197 | 36 | ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) { | 3198 | 36 | switch (e->getOpcode()) { | 3199 | 0 | case BO_Comma: | 3200 | 0 | CGF.EmitIgnoredExpr(e->getLHS()); | 3201 | 0 | CGF.EnsureInsertPoint(); | 3202 | 0 | return asImpl().visit(e->getRHS()); | 3203 | | | 3204 | 36 | case BO_Assign: | 3205 | 36 | return asImpl().visitBinAssign(e); | 3206 | | | 3207 | 0 | default: | 3208 | 0 | return asImpl().visitExpr(e); | 3209 | 36 | } | 3210 | 36 | } |
CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitBinaryOperator(clang::BinaryOperator const*) Line | Count | Source | 3197 | 40 | ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) { | 3198 | 40 | switch (e->getOpcode()) { | 3199 | 0 | case BO_Comma: | 3200 | 0 | CGF.EmitIgnoredExpr(e->getLHS()); | 3201 | 0 | CGF.EnsureInsertPoint(); | 3202 | 0 | return asImpl().visit(e->getRHS()); | 3203 | | | 3204 | 40 | case BO_Assign: | 3205 | 40 | return asImpl().visitBinAssign(e); | 3206 | | | 3207 | 0 | default: | 3208 | 0 | return asImpl().visitExpr(e); | 3209 | 40 | } | 3210 | 40 | } |
|
3211 | | |
3212 | | template <typename Impl, typename Result> |
3213 | 76 | Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) { |
3214 | 76 | switch (e->getLHS()->getType().getObjCLifetime()) { |
3215 | 40 | case Qualifiers::OCL_ExplicitNone: |
3216 | 40 | return asImpl().visitBinAssignUnsafeUnretained(e); |
3217 | | |
3218 | 8 | case Qualifiers::OCL_Weak: |
3219 | 8 | return asImpl().visitBinAssignWeak(e); |
3220 | | |
3221 | 4 | case Qualifiers::OCL_Autoreleasing: |
3222 | 4 | return asImpl().visitBinAssignAutoreleasing(e); |
3223 | | |
3224 | 24 | case Qualifiers::OCL_Strong: |
3225 | 24 | return asImpl().visitBinAssignStrong(e); |
3226 | | |
3227 | 0 | case Qualifiers::OCL_None: |
3228 | 0 | return asImpl().visitExpr(e); |
3229 | 76 | } |
3230 | 0 | llvm_unreachable("bad ObjC ownership qualifier"); |
3231 | 0 | } CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitBinAssign(clang::BinaryOperator const*) Line | Count | Source | 3213 | 36 | Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) { | 3214 | 36 | switch (e->getLHS()->getType().getObjCLifetime()) { | 3215 | 20 | case Qualifiers::OCL_ExplicitNone: | 3216 | 20 | return asImpl().visitBinAssignUnsafeUnretained(e); | 3217 | | | 3218 | 8 | case Qualifiers::OCL_Weak: | 3219 | 8 | return asImpl().visitBinAssignWeak(e); | 3220 | | | 3221 | 4 | case Qualifiers::OCL_Autoreleasing: | 3222 | 4 | return asImpl().visitBinAssignAutoreleasing(e); | 3223 | | | 3224 | 4 | case Qualifiers::OCL_Strong: | 3225 | 4 | return asImpl().visitBinAssignStrong(e); | 3226 | | | 3227 | 0 | case Qualifiers::OCL_None: | 3228 | 0 | return asImpl().visitExpr(e); | 3229 | 36 | } | 3230 | 0 | llvm_unreachable("bad ObjC ownership qualifier"); | 3231 | 0 | } |
CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitBinAssign(clang::BinaryOperator const*) Line | Count | Source | 3213 | 40 | Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) { | 3214 | 40 | switch (e->getLHS()->getType().getObjCLifetime()) { | 3215 | 20 | case Qualifiers::OCL_ExplicitNone: | 3216 | 20 | return asImpl().visitBinAssignUnsafeUnretained(e); | 3217 | | | 3218 | 0 | case Qualifiers::OCL_Weak: | 3219 | 0 | return asImpl().visitBinAssignWeak(e); | 3220 | | | 3221 | 0 | case Qualifiers::OCL_Autoreleasing: | 3222 | 0 | return asImpl().visitBinAssignAutoreleasing(e); | 3223 | | | 3224 | 20 | case Qualifiers::OCL_Strong: | 3225 | 20 | return asImpl().visitBinAssignStrong(e); | 3226 | | | 3227 | 0 | case Qualifiers::OCL_None: | 3228 | 0 | return asImpl().visitExpr(e); | 3229 | 40 | } | 3230 | 0 | llvm_unreachable("bad ObjC ownership qualifier"); | 3231 | 0 | } |
|
3232 | | |
3233 | | /// The default rule for __unsafe_unretained emits the RHS recursively, |
3234 | | /// stores into the unsafe variable, and propagates the result outward. |
3235 | | template <typename Impl, typename Result> |
3236 | | Result ARCExprEmitter<Impl,Result>:: |
3237 | 40 | visitBinAssignUnsafeUnretained(const BinaryOperator *e) { |
3238 | | // Recursively emit the RHS. |
3239 | | // For __block safety, do this before emitting the LHS. |
3240 | 40 | Result result = asImpl().visit(e->getRHS()); |
3241 | | |
3242 | | // Perform the store. |
3243 | 40 | LValue lvalue = |
3244 | 40 | CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store); |
3245 | 40 | CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)), |
3246 | 40 | lvalue); |
3247 | | |
3248 | 40 | return result; |
3249 | 40 | } CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitBinAssignUnsafeUnretained(clang::BinaryOperator const*) Line | Count | Source | 3237 | 20 | visitBinAssignUnsafeUnretained(const BinaryOperator *e) { | 3238 | | // Recursively emit the RHS. | 3239 | | // For __block safety, do this before emitting the LHS. | 3240 | 20 | Result result = asImpl().visit(e->getRHS()); | 3241 | | | 3242 | | // Perform the store. | 3243 | 20 | LValue lvalue = | 3244 | 20 | CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store); | 3245 | 20 | CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)), | 3246 | 20 | lvalue); | 3247 | | | 3248 | 20 | return result; | 3249 | 20 | } |
CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitBinAssignUnsafeUnretained(clang::BinaryOperator const*) Line | Count | Source | 3237 | 20 | visitBinAssignUnsafeUnretained(const BinaryOperator *e) { | 3238 | | // Recursively emit the RHS. | 3239 | | // For __block safety, do this before emitting the LHS. | 3240 | 20 | Result result = asImpl().visit(e->getRHS()); | 3241 | | | 3242 | | // Perform the store. | 3243 | 20 | LValue lvalue = | 3244 | 20 | CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store); | 3245 | 20 | CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)), | 3246 | 20 | lvalue); | 3247 | | | 3248 | 20 | return result; | 3249 | 20 | } |
|
3250 | | |
3251 | | template <typename Impl, typename Result> |
3252 | | Result |
3253 | 4 | ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) { |
3254 | 4 | return asImpl().visitExpr(e); |
3255 | 4 | } CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitBinAssignAutoreleasing(clang::BinaryOperator const*) Line | Count | Source | 3253 | 4 | ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) { | 3254 | 4 | return asImpl().visitExpr(e); | 3255 | 4 | } |
Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitBinAssignAutoreleasing(clang::BinaryOperator const*) |
3256 | | |
3257 | | template <typename Impl, typename Result> |
3258 | | Result |
3259 | 8 | ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) { |
3260 | 8 | return asImpl().visitExpr(e); |
3261 | 8 | } CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitBinAssignWeak(clang::BinaryOperator const*) Line | Count | Source | 3259 | 8 | ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) { | 3260 | 8 | return asImpl().visitExpr(e); | 3261 | 8 | } |
Unexecuted instantiation: CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitBinAssignWeak(clang::BinaryOperator const*) |
3262 | | |
3263 | | template <typename Impl, typename Result> |
3264 | | Result |
3265 | 24 | ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) { |
3266 | 24 | return asImpl().visitExpr(e); |
3267 | 24 | } CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visitBinAssignStrong(clang::BinaryOperator const*) Line | Count | Source | 3265 | 4 | ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) { | 3266 | 4 | return asImpl().visitExpr(e); | 3267 | 4 | } |
CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visitBinAssignStrong(clang::BinaryOperator const*) Line | Count | Source | 3265 | 20 | ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) { | 3266 | 20 | return asImpl().visitExpr(e); | 3267 | 20 | } |
|
3268 | | |
3269 | | /// The general expression-emission logic. |
3270 | | template <typename Impl, typename Result> |
3271 | 1.55k | Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) { |
3272 | | // We should *never* see a nested full-expression here, because if |
3273 | | // we fail to emit at +1, our caller must not retain after we close |
3274 | | // out the full-expression. This isn't as important in the unsafe |
3275 | | // emitter. |
3276 | 1.55k | assert(!isa<ExprWithCleanups>(e)); |
3277 | | |
3278 | | // Look through parens, __extension__, generic selection, etc. |
3279 | 0 | e = e->IgnoreParens(); |
3280 | | |
3281 | | // Handle certain kinds of casts. |
3282 | 1.55k | if (const CastExpr *ce = dyn_cast<CastExpr>(e)) { |
3283 | 1.01k | return asImpl().visitCastExpr(ce); |
3284 | | |
3285 | | // Handle the comma operator. |
3286 | 1.01k | } else if (auto 546 op546 = dyn_cast<BinaryOperator>(e)) { |
3287 | 76 | return asImpl().visitBinaryOperator(op); |
3288 | | |
3289 | | // TODO: handle conditional operators here |
3290 | | |
3291 | | // For calls and message sends, use the retained-call logic. |
3292 | | // Delegate inits are a special case in that they're the only |
3293 | | // returns-retained expression that *isn't* surrounded by |
3294 | | // a consume. |
3295 | 470 | } else if (isa<CallExpr>(e) || |
3296 | 470 | (460 isa<ObjCMessageExpr>(e)460 && |
3297 | 460 | !cast<ObjCMessageExpr>(e)->isDelegateInitCall()13 )) { |
3298 | 14 | return asImpl().visitCall(e); |
3299 | | |
3300 | | // Look through pseudo-object expressions. |
3301 | 456 | } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { |
3302 | 10 | return asImpl().visitPseudoObjectExpr(pseudo); |
3303 | 446 | } else if (auto *be = dyn_cast<BlockExpr>(e)) |
3304 | 183 | return asImpl().visitBlockExpr(be); |
3305 | | |
3306 | 263 | return asImpl().visitExpr(e); |
3307 | 1.55k | } CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCRetainExprEmitter, llvm::PointerIntPair<llvm::Value*, 1u, bool, llvm::PointerLikeTypeTraits<llvm::Value*>, llvm::PointerIntPairInfo<llvm::Value*, 1u, llvm::PointerLikeTypeTraits<llvm::Value*> > > >::visit(clang::Expr const*) Line | Count | Source | 3271 | 1.33k | Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) { | 3272 | | // We should *never* see a nested full-expression here, because if | 3273 | | // we fail to emit at +1, our caller must not retain after we close | 3274 | | // out the full-expression. This isn't as important in the unsafe | 3275 | | // emitter. | 3276 | 1.33k | assert(!isa<ExprWithCleanups>(e)); | 3277 | | | 3278 | | // Look through parens, __extension__, generic selection, etc. | 3279 | 0 | e = e->IgnoreParens(); | 3280 | | | 3281 | | // Handle certain kinds of casts. | 3282 | 1.33k | if (const CastExpr *ce = dyn_cast<CastExpr>(e)) { | 3283 | 905 | return asImpl().visitCastExpr(ce); | 3284 | | | 3285 | | // Handle the comma operator. | 3286 | 905 | } else if (auto 431 op431 = dyn_cast<BinaryOperator>(e)) { | 3287 | 36 | return asImpl().visitBinaryOperator(op); | 3288 | | | 3289 | | // TODO: handle conditional operators here | 3290 | | | 3291 | | // For calls and message sends, use the retained-call logic. | 3292 | | // Delegate inits are a special case in that they're the only | 3293 | | // returns-retained expression that *isn't* surrounded by | 3294 | | // a consume. | 3295 | 395 | } else if (isa<CallExpr>(e) || | 3296 | 395 | (389 isa<ObjCMessageExpr>(e)389 && | 3297 | 389 | !cast<ObjCMessageExpr>(e)->isDelegateInitCall()13 )) { | 3298 | 10 | return asImpl().visitCall(e); | 3299 | | | 3300 | | // Look through pseudo-object expressions. | 3301 | 385 | } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { | 3302 | 10 | return asImpl().visitPseudoObjectExpr(pseudo); | 3303 | 375 | } else if (auto *be = dyn_cast<BlockExpr>(e)) | 3304 | 183 | return asImpl().visitBlockExpr(be); | 3305 | | | 3306 | 192 | return asImpl().visitExpr(e); | 3307 | 1.33k | } |
CGObjC.cpp:(anonymous namespace)::ARCExprEmitter<(anonymous namespace)::ARCUnsafeUnretainedExprEmitter, llvm::Value*>::visit(clang::Expr const*) Line | Count | Source | 3271 | 221 | Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) { | 3272 | | // We should *never* see a nested full-expression here, because if | 3273 | | // we fail to emit at +1, our caller must not retain after we close | 3274 | | // out the full-expression. This isn't as important in the unsafe | 3275 | | // emitter. | 3276 | 221 | assert(!isa<ExprWithCleanups>(e)); | 3277 | | | 3278 | | // Look through parens, __extension__, generic selection, etc. | 3279 | 0 | e = e->IgnoreParens(); | 3280 | | | 3281 | | // Handle certain kinds of casts. | 3282 | 221 | if (const CastExpr *ce = dyn_cast<CastExpr>(e)) { | 3283 | 106 | return asImpl().visitCastExpr(ce); | 3284 | | | 3285 | | // Handle the comma operator. | 3286 | 115 | } else if (auto op = dyn_cast<BinaryOperator>(e)) { | 3287 | 40 | return asImpl().visitBinaryOperator(op); | 3288 | | | 3289 | | // TODO: handle conditional operators here | 3290 | | | 3291 | | // For calls and message sends, use the retained-call logic. | 3292 | | // Delegate inits are a special case in that they're the only | 3293 | | // returns-retained expression that *isn't* surrounded by | 3294 | | // a consume. | 3295 | 75 | } else if (isa<CallExpr>(e) || | 3296 | 75 | (71 isa<ObjCMessageExpr>(e)71 && | 3297 | 71 | !cast<ObjCMessageExpr>(e)->isDelegateInitCall()0 )) { | 3298 | 4 | return asImpl().visitCall(e); | 3299 | | | 3300 | | // Look through pseudo-object expressions. | 3301 | 71 | } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { | 3302 | 0 | return asImpl().visitPseudoObjectExpr(pseudo); | 3303 | 71 | } else if (auto *be = dyn_cast<BlockExpr>(e)) | 3304 | 0 | return asImpl().visitBlockExpr(be); | 3305 | | | 3306 | 71 | return asImpl().visitExpr(e); | 3307 | 221 | } |
|
3308 | | |
3309 | | namespace { |
3310 | | |
3311 | | /// An emitter for +1 results. |
3312 | | struct ARCRetainExprEmitter : |
3313 | | public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> { |
3314 | | |
3315 | 1.16k | ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} |
3316 | | |
3317 | 20 | llvm::Value *getValueOfResult(TryEmitResult result) { |
3318 | 20 | return result.getPointer(); |
3319 | 20 | } |
3320 | | |
3321 | 110 | TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) { |
3322 | 110 | llvm::Value *value = result.getPointer(); |
3323 | 110 | value = CGF.Builder.CreateBitCast(value, resultType); |
3324 | 110 | result.setPointer(value); |
3325 | 110 | return result; |
3326 | 110 | } |
3327 | | |
3328 | 367 | TryEmitResult visitLValueToRValue(const Expr *e) { |
3329 | 367 | return tryEmitARCRetainLoadOfScalar(CGF, e); |
3330 | 367 | } |
3331 | | |
3332 | | /// For consumptions, just emit the subexpression and thus elide |
3333 | | /// the retain/release pair. |
3334 | 82 | TryEmitResult visitConsumeObject(const Expr *e) { |
3335 | 82 | llvm::Value *result = CGF.EmitScalarExpr(e); |
3336 | 82 | return TryEmitResult(result, true); |
3337 | 82 | } |
3338 | | |
3339 | 183 | TryEmitResult visitBlockExpr(const BlockExpr *e) { |
3340 | 183 | TryEmitResult result = visitExpr(e); |
3341 | | // Avoid the block-retain if this is a block literal that doesn't need to be |
3342 | | // copied to the heap. |
3343 | 183 | if (CGF.CGM.getCodeGenOpts().ObjCAvoidHeapifyLocalBlocks && |
3344 | 183 | e->getBlockDecl()->canAvoidCopyToHeap()28 ) |
3345 | 13 | result.setInt(true); |
3346 | 183 | return result; |
3347 | 183 | } |
3348 | | |
3349 | | /// Block extends are net +0. Naively, we could just recurse on |
3350 | | /// the subexpression, but actually we need to ensure that the |
3351 | | /// value is copied as a block, so there's a little filter here. |
3352 | 13 | TryEmitResult visitExtendBlockObject(const Expr *e) { |
3353 | 13 | llvm::Value *result; // will be a +0 value |
3354 | | |
3355 | | // If we can't safely assume the sub-expression will produce a |
3356 | | // block-copied value, emit the sub-expression at +0. |
3357 | 13 | if (shouldEmitSeparateBlockRetain(e)) { |
3358 | 0 | result = CGF.EmitScalarExpr(e); |
3359 | | |
3360 | | // Otherwise, try to emit the sub-expression at +1 recursively. |
3361 | 13 | } else { |
3362 | 13 | TryEmitResult subresult = asImpl().visit(e); |
3363 | | |
3364 | | // If that produced a retained value, just use that. |
3365 | 13 | if (subresult.getInt()) { |
3366 | 0 | return subresult; |
3367 | 0 | } |
3368 | | |
3369 | | // Otherwise it's +0. |
3370 | 13 | result = subresult.getPointer(); |
3371 | 13 | } |
3372 | | |
3373 | | // Retain the object as a block. |
3374 | 13 | result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true); |
3375 | 13 | return TryEmitResult(result, true); |
3376 | 13 | } |
3377 | | |
3378 | | /// For reclaims, emit the subexpression as a retained call and |
3379 | | /// skip the consumption. |
3380 | 215 | TryEmitResult visitReclaimReturnedObject(const Expr *e) { |
3381 | 215 | llvm::Value *result = emitARCRetainCallResult(CGF, e); |
3382 | 215 | return TryEmitResult(result, true); |
3383 | 215 | } |
3384 | | |
3385 | | /// When we have an undecorated call, retroactively do a claim. |
3386 | 10 | TryEmitResult visitCall(const Expr *e) { |
3387 | 10 | llvm::Value *result = emitARCRetainCallResult(CGF, e); |
3388 | 10 | return TryEmitResult(result, true); |
3389 | 10 | } |
3390 | | |
3391 | | // TODO: maybe special-case visitBinAssignWeak? |
3392 | | |
3393 | 491 | TryEmitResult visitExpr(const Expr *e) { |
3394 | | // We didn't find an obvious production, so emit what we've got and |
3395 | | // tell the caller that we didn't manage to retain. |
3396 | 491 | llvm::Value *result = CGF.EmitScalarExpr(e); |
3397 | 491 | return TryEmitResult(result, false); |
3398 | 491 | } |
3399 | | }; |
3400 | | } |
3401 | | |
3402 | | static TryEmitResult |
3403 | 1.16k | tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) { |
3404 | 1.16k | return ARCRetainExprEmitter(CGF).visit(e); |
3405 | 1.16k | } |
3406 | | |
3407 | | static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, |
3408 | | LValue lvalue, |
3409 | 6 | QualType type) { |
3410 | 6 | TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type); |
3411 | 6 | llvm::Value *value = result.getPointer(); |
3412 | 6 | if (!result.getInt()) |
3413 | 0 | value = CGF.EmitARCRetain(type, value); |
3414 | 6 | return value; |
3415 | 6 | } |
3416 | | |
3417 | | /// EmitARCRetainScalarExpr - Semantically equivalent to |
3418 | | /// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a |
3419 | | /// best-effort attempt to peephole expressions that naturally produce |
3420 | | /// retained objects. |
3421 | 980 | llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) { |
3422 | | // The retain needs to happen within the full-expression. |
3423 | 980 | if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { |
3424 | 10 | RunCleanupsScope scope(*this); |
3425 | 10 | return EmitARCRetainScalarExpr(cleanups->getSubExpr()); |
3426 | 10 | } |
3427 | | |
3428 | 970 | TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); |
3429 | 970 | llvm::Value *value = result.getPointer(); |
3430 | 970 | if (!result.getInt()) |
3431 | 687 | value = EmitARCRetain(e->getType(), value); |
3432 | 970 | return value; |
3433 | 980 | } |
3434 | | |
3435 | | llvm::Value * |
3436 | 16 | CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) { |
3437 | | // The retain needs to happen within the full-expression. |
3438 | 16 | if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { |
3439 | 1 | RunCleanupsScope scope(*this); |
3440 | 1 | return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr()); |
3441 | 1 | } |
3442 | | |
3443 | 15 | TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); |
3444 | 15 | llvm::Value *value = result.getPointer(); |
3445 | 15 | if (result.getInt()) |
3446 | 7 | value = EmitARCAutorelease(value); |
3447 | 8 | else |
3448 | 8 | value = EmitARCRetainAutorelease(e->getType(), value); |
3449 | 15 | return value; |
3450 | 16 | } |
3451 | | |
3452 | 11 | llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) { |
3453 | 11 | llvm::Value *result; |
3454 | 11 | bool doRetain; |
3455 | | |
3456 | 11 | if (shouldEmitSeparateBlockRetain(e)) { |
3457 | 1 | result = EmitScalarExpr(e); |
3458 | 1 | doRetain = true; |
3459 | 10 | } else { |
3460 | 10 | TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e); |
3461 | 10 | result = subresult.getPointer(); |
3462 | 10 | doRetain = !subresult.getInt(); |
3463 | 10 | } |
3464 | | |
3465 | 11 | if (doRetain) |
3466 | 11 | result = EmitARCRetainBlock(result, /*mandatory*/ true); |
3467 | 11 | return EmitObjCConsumeObject(e->getType(), result); |
3468 | 11 | } |
3469 | | |
3470 | 36 | llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) { |
3471 | | // In ARC, retain and autorelease the expression. |
3472 | 36 | if (getLangOpts().ObjCAutoRefCount) { |
3473 | | // Do so before running any cleanups for the full-expression. |
3474 | | // EmitARCRetainAutoreleaseScalarExpr does this for us. |
3475 | 1 | return EmitARCRetainAutoreleaseScalarExpr(expr); |
3476 | 1 | } |
3477 | | |
3478 | | // Otherwise, use the normal scalar-expression emission. The |
3479 | | // exception machinery doesn't do anything special with the |
3480 | | // exception like retaining it, so there's no safety associated with |
3481 | | // only running cleanups after the throw has started, and when it |
3482 | | // matters it tends to be substantially inferior code. |
3483 | 35 | return EmitScalarExpr(expr); |
3484 | 36 | } |
3485 | | |
3486 | | namespace { |
3487 | | |
3488 | | /// An emitter for assigning into an __unsafe_unretained context. |
3489 | | struct ARCUnsafeUnretainedExprEmitter : |
3490 | | public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> { |
3491 | | |
3492 | 160 | ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} |
3493 | | |
3494 | 20 | llvm::Value *getValueOfResult(llvm::Value *value) { |
3495 | 20 | return value; |
3496 | 20 | } |
3497 | | |
3498 | 41 | llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) { |
3499 | 41 | return CGF.Builder.CreateBitCast(value, resultType); |
3500 | 41 | } |
3501 | | |
3502 | 16 | llvm::Value *visitLValueToRValue(const Expr *e) { |
3503 | 16 | return CGF.EmitScalarExpr(e); |
3504 | 16 | } |
3505 | | |
3506 | | /// For consumptions, just emit the subexpression and perform the |
3507 | | /// consumption like normal. |
3508 | 4 | llvm::Value *visitConsumeObject(const Expr *e) { |
3509 | 4 | llvm::Value *value = CGF.EmitScalarExpr(e); |
3510 | 4 | return CGF.EmitObjCConsumeObject(e->getType(), value); |
3511 | 4 | } |
3512 | | |
3513 | | /// No special logic for block extensions. (This probably can't |
3514 | | /// actually happen in this emitter, though.) |
3515 | 0 | llvm::Value *visitExtendBlockObject(const Expr *e) { |
3516 | 0 | return CGF.EmitARCExtendBlockObject(e); |
3517 | 0 | } |
3518 | | |
3519 | | /// For reclaims, perform an unsafeClaim if that's enabled. |
3520 | 43 | llvm::Value *visitReclaimReturnedObject(const Expr *e) { |
3521 | 43 | return CGF.EmitARCReclaimReturnedObject(e, /*unsafe*/ true); |
3522 | 43 | } |
3523 | | |
3524 | | /// When we have an undecorated call, just emit it without adding |
3525 | | /// the unsafeClaim. |
3526 | 4 | llvm::Value *visitCall(const Expr *e) { |
3527 | 4 | return CGF.EmitScalarExpr(e); |
3528 | 4 | } |
3529 | | |
3530 | | /// Just do normal scalar emission in the default case. |
3531 | 93 | llvm::Value *visitExpr(const Expr *e) { |
3532 | 93 | return CGF.EmitScalarExpr(e); |
3533 | 93 | } |
3534 | | }; |
3535 | | } |
3536 | | |
3537 | | static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF, |
3538 | 160 | const Expr *e) { |
3539 | 160 | return ARCUnsafeUnretainedExprEmitter(CGF).visit(e); |
3540 | 160 | } |
3541 | | |
3542 | | /// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to |
3543 | | /// immediately releasing the resut of EmitARCRetainScalarExpr, but |
3544 | | /// avoiding any spurious retains, including by performing reclaims |
3545 | | /// with objc_unsafeClaimAutoreleasedReturnValue. |
3546 | 160 | llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) { |
3547 | | // Look through full-expressions. |
3548 | 160 | if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { |
3549 | 0 | RunCleanupsScope scope(*this); |
3550 | 0 | return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr()); |
3551 | 0 | } |
3552 | | |
3553 | 160 | return emitARCUnsafeUnretainedScalarExpr(*this, e); |
3554 | 160 | } |
3555 | | |
3556 | | std::pair<LValue,llvm::Value*> |
3557 | | CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e, |
3558 | 35 | bool ignored) { |
3559 | | // Evaluate the RHS first. If we're ignoring the result, assume |
3560 | | // that we can emit at an unsafe +0. |
3561 | 35 | llvm::Value *value; |
3562 | 35 | if (ignored) { |
3563 | 35 | value = EmitARCUnsafeUnretainedScalarExpr(e->getRHS()); |
3564 | 35 | } else { |
3565 | 0 | value = EmitScalarExpr(e->getRHS()); |
3566 | 0 | } |
3567 | | |
3568 | | // Emit the LHS and perform the store. |
3569 | 35 | LValue lvalue = EmitLValue(e->getLHS()); |
3570 | 35 | EmitStoreOfScalar(value, lvalue); |
3571 | | |
3572 | 35 | return std::pair<LValue,llvm::Value*>(std::move(lvalue), value); |
3573 | 35 | } |
3574 | | |
3575 | | std::pair<LValue,llvm::Value*> |
3576 | | CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e, |
3577 | 159 | bool ignored) { |
3578 | | // Evaluate the RHS first. |
3579 | 159 | TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS()); |
3580 | 159 | llvm::Value *value = result.getPointer(); |
3581 | | |
3582 | 159 | bool hasImmediateRetain = result.getInt(); |
3583 | | |
3584 | | // If we didn't emit a retained object, and the l-value is of block |
3585 | | // type, then we need to emit the block-retain immediately in case |
3586 | | // it invalidates the l-value. |
3587 | 159 | if (!hasImmediateRetain && e->getType()->isBlockPointerType()105 ) { |
3588 | 24 | value = EmitARCRetainBlock(value, /*mandatory*/ false); |
3589 | 24 | hasImmediateRetain = true; |
3590 | 24 | } |
3591 | | |
3592 | 159 | LValue lvalue = EmitLValue(e->getLHS()); |
3593 | | |
3594 | | // If the RHS was emitted retained, expand this. |
3595 | 159 | if (hasImmediateRetain) { |
3596 | 78 | llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation()); |
3597 | 78 | EmitStoreOfScalar(value, lvalue); |
3598 | 78 | EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime()); |
3599 | 81 | } else { |
3600 | 81 | value = EmitARCStoreStrong(lvalue, value, ignored); |
3601 | 81 | } |
3602 | | |
3603 | 159 | return std::pair<LValue,llvm::Value*>(lvalue, value); |
3604 | 159 | } |
3605 | | |
3606 | | std::pair<LValue,llvm::Value*> |
3607 | 4 | CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) { |
3608 | 4 | llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS()); |
3609 | 4 | LValue lvalue = EmitLValue(e->getLHS()); |
3610 | | |
3611 | 4 | EmitStoreOfScalar(value, lvalue); |
3612 | | |
3613 | 4 | return std::pair<LValue,llvm::Value*>(lvalue, value); |
3614 | 4 | } |
3615 | | |
3616 | | void CodeGenFunction::EmitObjCAutoreleasePoolStmt( |
3617 | 113 | const ObjCAutoreleasePoolStmt &ARPS) { |
3618 | 113 | const Stmt *subStmt = ARPS.getSubStmt(); |
3619 | 113 | const CompoundStmt &S = cast<CompoundStmt>(*subStmt); |
3620 | | |
3621 | 113 | CGDebugInfo *DI = getDebugInfo(); |
3622 | 113 | if (DI) |
3623 | 94 | DI->EmitLexicalBlockStart(Builder, S.getLBracLoc()); |
3624 | | |
3625 | | // Keep track of the current cleanup stack depth. |
3626 | 113 | RunCleanupsScope Scope(*this); |
3627 | 113 | if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) { |
3628 | 96 | llvm::Value *token = EmitObjCAutoreleasePoolPush(); |
3629 | 96 | EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token); |
3630 | 96 | } else { |
3631 | 17 | llvm::Value *token = EmitObjCMRRAutoreleasePoolPush(); |
3632 | 17 | EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token); |
3633 | 17 | } |
3634 | | |
3635 | 113 | for (const auto *I : S.body()) |
3636 | 530 | EmitStmt(I); |
3637 | | |
3638 | 113 | if (DI) |
3639 | 94 | DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc()); |
3640 | 113 | } |
3641 | | |
3642 | | /// EmitExtendGCLifetime - Given a pointer to an Objective-C object, |
3643 | | /// make sure it survives garbage collection until this point. |
3644 | 1 | void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) { |
3645 | | // We just use an inline assembly. |
3646 | 1 | llvm::FunctionType *extenderType |
3647 | 1 | = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All); |
3648 | 1 | llvm::InlineAsm *extender = llvm::InlineAsm::get(extenderType, |
3649 | 1 | /* assembly */ "", |
3650 | 1 | /* constraints */ "r", |
3651 | 1 | /* side effects */ true); |
3652 | | |
3653 | 1 | object = Builder.CreateBitCast(object, VoidPtrTy); |
3654 | 1 | EmitNounwindRuntimeCall(extender, object); |
3655 | 1 | } |
3656 | | |
3657 | | /// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with |
3658 | | /// non-trivial copy assignment function, produce following helper function. |
3659 | | /// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; } |
3660 | | /// |
3661 | | llvm::Constant * |
3662 | | CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction( |
3663 | 442 | const ObjCPropertyImplDecl *PID) { |
3664 | 442 | if (!getLangOpts().CPlusPlus || |
3665 | 442 | !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()32 ) |
3666 | 410 | return nullptr; |
3667 | 32 | QualType Ty = PID->getPropertyIvarDecl()->getType(); |
3668 | 32 | if (!Ty->isRecordType()) |
3669 | 10 | return nullptr; |
3670 | 22 | const ObjCPropertyDecl *PD = PID->getPropertyDecl(); |
3671 | 22 | if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic))) |
3672 | 7 | return nullptr; |
3673 | 15 | llvm::Constant *HelperFn = nullptr; |
3674 | 15 | if (hasTrivialSetExpr(PID)) |
3675 | 8 | return nullptr; |
3676 | 7 | assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null"); |
3677 | 7 | if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty))) |
3678 | 2 | return HelperFn; |
3679 | | |
3680 | 5 | ASTContext &C = getContext(); |
3681 | 5 | IdentifierInfo *II |
3682 | 5 | = &CGM.getContext().Idents.get("__assign_helper_atomic_property_"); |
3683 | | |
3684 | 5 | QualType ReturnTy = C.VoidTy; |
3685 | 5 | QualType DestTy = C.getPointerType(Ty); |
3686 | 5 | QualType SrcTy = Ty; |
3687 | 5 | SrcTy.addConst(); |
3688 | 5 | SrcTy = C.getPointerType(SrcTy); |
3689 | | |
3690 | 5 | SmallVector<QualType, 2> ArgTys; |
3691 | 5 | ArgTys.push_back(DestTy); |
3692 | 5 | ArgTys.push_back(SrcTy); |
3693 | 5 | QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {}); |
3694 | | |
3695 | 5 | FunctionDecl *FD = FunctionDecl::Create( |
3696 | 5 | C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, |
3697 | 5 | FunctionTy, nullptr, SC_Static, false, false, false); |
3698 | | |
3699 | 5 | FunctionArgList args; |
3700 | 5 | ParmVarDecl *Params[2]; |
3701 | 5 | ParmVarDecl *DstDecl = ParmVarDecl::Create( |
3702 | 5 | C, FD, SourceLocation(), SourceLocation(), nullptr, DestTy, |
3703 | 5 | C.getTrivialTypeSourceInfo(DestTy, SourceLocation()), SC_None, |
3704 | 5 | /*DefArg=*/nullptr); |
3705 | 5 | args.push_back(Params[0] = DstDecl); |
3706 | 5 | ParmVarDecl *SrcDecl = ParmVarDecl::Create( |
3707 | 5 | C, FD, SourceLocation(), SourceLocation(), nullptr, SrcTy, |
3708 | 5 | C.getTrivialTypeSourceInfo(SrcTy, SourceLocation()), SC_None, |
3709 | 5 | /*DefArg=*/nullptr); |
3710 | 5 | args.push_back(Params[1] = SrcDecl); |
3711 | 5 | FD->setParams(Params); |
3712 | | |
3713 | 5 | const CGFunctionInfo &FI = |
3714 | 5 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args); |
3715 | | |
3716 | 5 | llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); |
3717 | | |
3718 | 5 | llvm::Function *Fn = |
3719 | 5 | llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, |
3720 | 5 | "__assign_helper_atomic_property_", |
3721 | 5 | &CGM.getModule()); |
3722 | | |
3723 | 5 | CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); |
3724 | | |
3725 | 5 | StartFunction(FD, ReturnTy, Fn, FI, args); |
3726 | | |
3727 | 5 | DeclRefExpr DstExpr(C, DstDecl, false, DestTy, VK_PRValue, SourceLocation()); |
3728 | 5 | UnaryOperator *DST = UnaryOperator::Create( |
3729 | 5 | C, &DstExpr, UO_Deref, DestTy->getPointeeType(), VK_LValue, OK_Ordinary, |
3730 | 5 | SourceLocation(), false, FPOptionsOverride()); |
3731 | | |
3732 | 5 | DeclRefExpr SrcExpr(C, SrcDecl, false, SrcTy, VK_PRValue, SourceLocation()); |
3733 | 5 | UnaryOperator *SRC = UnaryOperator::Create( |
3734 | 5 | C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary, |
3735 | 5 | SourceLocation(), false, FPOptionsOverride()); |
3736 | | |
3737 | 5 | Expr *Args[2] = {DST, SRC}; |
3738 | 5 | CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment()); |
3739 | 5 | CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create( |
3740 | 5 | C, OO_Equal, CalleeExp->getCallee(), Args, DestTy->getPointeeType(), |
3741 | 5 | VK_LValue, SourceLocation(), FPOptionsOverride()); |
3742 | | |
3743 | 5 | EmitStmt(TheCall); |
3744 | | |
3745 | 5 | FinishFunction(); |
3746 | 5 | HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); |
3747 | 5 | CGM.setAtomicSetterHelperFnMap(Ty, HelperFn); |
3748 | 5 | return HelperFn; |
3749 | 7 | } |
3750 | | |
3751 | | llvm::Constant * |
3752 | | CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( |
3753 | 453 | const ObjCPropertyImplDecl *PID) { |
3754 | 453 | if (!getLangOpts().CPlusPlus || |
3755 | 453 | !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()32 ) |
3756 | 421 | return nullptr; |
3757 | 32 | const ObjCPropertyDecl *PD = PID->getPropertyDecl(); |
3758 | 32 | QualType Ty = PD->getType(); |
3759 | 32 | if (!Ty->isRecordType()) |
3760 | 13 | return nullptr; |
3761 | 19 | if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic))) |
3762 | 5 | return nullptr; |
3763 | 14 | llvm::Constant *HelperFn = nullptr; |
3764 | 14 | if (hasTrivialGetExpr(PID)) |
3765 | 8 | return nullptr; |
3766 | 6 | assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null"); |
3767 | 6 | if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty))) |
3768 | 2 | return HelperFn; |
3769 | | |
3770 | 4 | ASTContext &C = getContext(); |
3771 | 4 | IdentifierInfo *II = |
3772 | 4 | &CGM.getContext().Idents.get("__copy_helper_atomic_property_"); |
3773 | | |
3774 | 4 | QualType ReturnTy = C.VoidTy; |
3775 | 4 | QualType DestTy = C.getPointerType(Ty); |
3776 | 4 | QualType SrcTy = Ty; |
3777 | 4 | SrcTy.addConst(); |
3778 | 4 | SrcTy = C.getPointerType(SrcTy); |
3779 | | |
3780 | 4 | SmallVector<QualType, 2> ArgTys; |
3781 | 4 | ArgTys.push_back(DestTy); |
3782 | 4 | ArgTys.push_back(SrcTy); |
3783 | 4 | QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {}); |
3784 | | |
3785 | 4 | FunctionDecl *FD = FunctionDecl::Create( |
3786 | 4 | C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, |
3787 | 4 | FunctionTy, nullptr, SC_Static, false, false, false); |
3788 | | |
3789 | 4 | FunctionArgList args; |
3790 | 4 | ParmVarDecl *Params[2]; |
3791 | 4 | ParmVarDecl *DstDecl = ParmVarDecl::Create( |
3792 | 4 | C, FD, SourceLocation(), SourceLocation(), nullptr, DestTy, |
3793 | 4 | C.getTrivialTypeSourceInfo(DestTy, SourceLocation()), SC_None, |
3794 | 4 | /*DefArg=*/nullptr); |
3795 | 4 | args.push_back(Params[0] = DstDecl); |
3796 | 4 | ParmVarDecl *SrcDecl = ParmVarDecl::Create( |
3797 | 4 | C, FD, SourceLocation(), SourceLocation(), nullptr, SrcTy, |
3798 | 4 | C.getTrivialTypeSourceInfo(SrcTy, SourceLocation()), SC_None, |
3799 | 4 | /*DefArg=*/nullptr); |
3800 | 4 | args.push_back(Params[1] = SrcDecl); |
3801 | 4 | FD->setParams(Params); |
3802 | | |
3803 | 4 | const CGFunctionInfo &FI = |
3804 | 4 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args); |
3805 | | |
3806 | 4 | llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); |
3807 | | |
3808 | 4 | llvm::Function *Fn = llvm::Function::Create( |
3809 | 4 | LTy, llvm::GlobalValue::InternalLinkage, "__copy_helper_atomic_property_", |
3810 | 4 | &CGM.getModule()); |
3811 | | |
3812 | 4 | CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); |
3813 | | |
3814 | 4 | StartFunction(FD, ReturnTy, Fn, FI, args); |
3815 | | |
3816 | 4 | DeclRefExpr SrcExpr(getContext(), SrcDecl, false, SrcTy, VK_PRValue, |
3817 | 4 | SourceLocation()); |
3818 | | |
3819 | 4 | UnaryOperator *SRC = UnaryOperator::Create( |
3820 | 4 | C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary, |
3821 | 4 | SourceLocation(), false, FPOptionsOverride()); |
3822 | | |
3823 | 4 | CXXConstructExpr *CXXConstExpr = |
3824 | 4 | cast<CXXConstructExpr>(PID->getGetterCXXConstructor()); |
3825 | | |
3826 | 4 | SmallVector<Expr*, 4> ConstructorArgs; |
3827 | 4 | ConstructorArgs.push_back(SRC); |
3828 | 4 | ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()), |
3829 | 4 | CXXConstExpr->arg_end()); |
3830 | | |
3831 | 4 | CXXConstructExpr *TheCXXConstructExpr = |
3832 | 4 | CXXConstructExpr::Create(C, Ty, SourceLocation(), |
3833 | 4 | CXXConstExpr->getConstructor(), |
3834 | 4 | CXXConstExpr->isElidable(), |
3835 | 4 | ConstructorArgs, |
3836 | 4 | CXXConstExpr->hadMultipleCandidates(), |
3837 | 4 | CXXConstExpr->isListInitialization(), |
3838 | 4 | CXXConstExpr->isStdInitListInitialization(), |
3839 | 4 | CXXConstExpr->requiresZeroInitialization(), |
3840 | 4 | CXXConstExpr->getConstructionKind(), |
3841 | 4 | SourceRange()); |
3842 | | |
3843 | 4 | DeclRefExpr DstExpr(getContext(), DstDecl, false, DestTy, VK_PRValue, |
3844 | 4 | SourceLocation()); |
3845 | | |
3846 | 4 | RValue DV = EmitAnyExpr(&DstExpr); |
3847 | 4 | CharUnits Alignment = |
3848 | 4 | getContext().getTypeAlignInChars(TheCXXConstructExpr->getType()); |
3849 | 4 | EmitAggExpr(TheCXXConstructExpr, |
3850 | 4 | AggValueSlot::forAddr( |
3851 | 4 | Address(DV.getScalarVal(), ConvertTypeForMem(Ty), Alignment), |
3852 | 4 | Qualifiers(), AggValueSlot::IsDestructed, |
3853 | 4 | AggValueSlot::DoesNotNeedGCBarriers, |
3854 | 4 | AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); |
3855 | | |
3856 | 4 | FinishFunction(); |
3857 | 4 | HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); |
3858 | 4 | CGM.setAtomicGetterHelperFnMap(Ty, HelperFn); |
3859 | 4 | return HelperFn; |
3860 | 6 | } |
3861 | | |
3862 | | llvm::Value * |
3863 | 5 | CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) { |
3864 | | // Get selectors for retain/autorelease. |
3865 | 5 | IdentifierInfo *CopyID = &getContext().Idents.get("copy"); |
3866 | 5 | Selector CopySelector = |
3867 | 5 | getContext().Selectors.getNullarySelector(CopyID); |
3868 | 5 | IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease"); |
3869 | 5 | Selector AutoreleaseSelector = |
3870 | 5 | getContext().Selectors.getNullarySelector(AutoreleaseID); |
3871 | | |
3872 | | // Emit calls to retain/autorelease. |
3873 | 5 | CGObjCRuntime &Runtime = CGM.getObjCRuntime(); |
3874 | 5 | llvm::Value *Val = Block; |
3875 | 5 | RValue Result; |
3876 | 5 | Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), |
3877 | 5 | Ty, CopySelector, |
3878 | 5 | Val, CallArgList(), nullptr, nullptr); |
3879 | 5 | Val = Result.getScalarVal(); |
3880 | 5 | Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), |
3881 | 5 | Ty, AutoreleaseSelector, |
3882 | 5 | Val, CallArgList(), nullptr, nullptr); |
3883 | 5 | Val = Result.getScalarVal(); |
3884 | 5 | return Val; |
3885 | 5 | } |
3886 | | |
3887 | 18 | static unsigned getBaseMachOPlatformID(const llvm::Triple &TT) { |
3888 | 18 | switch (TT.getOS()) { |
3889 | 0 | case llvm::Triple::Darwin: |
3890 | 8 | case llvm::Triple::MacOSX: |
3891 | 8 | return llvm::MachO::PLATFORM_MACOS; |
3892 | 5 | case llvm::Triple::IOS: |
3893 | 5 | return llvm::MachO::PLATFORM_IOS; |
3894 | 2 | case llvm::Triple::TvOS: |
3895 | 2 | return llvm::MachO::PLATFORM_TVOS; |
3896 | 2 | case llvm::Triple::WatchOS: |
3897 | 2 | return llvm::MachO::PLATFORM_WATCHOS; |
3898 | 1 | case llvm::Triple::DriverKit: |
3899 | 1 | return llvm::MachO::PLATFORM_DRIVERKIT; |
3900 | 0 | default: |
3901 | 0 | return /*Unknown platform*/ 0; |
3902 | 18 | } |
3903 | 18 | } |
3904 | | |
3905 | | static llvm::Value *emitIsPlatformVersionAtLeast(CodeGenFunction &CGF, |
3906 | 18 | const VersionTuple &Version) { |
3907 | 18 | CodeGenModule &CGM = CGF.CGM; |
3908 | | // Note: we intend to support multi-platform version checks, so reserve |
3909 | | // the room for a dual platform checking invocation that will be |
3910 | | // implemented in the future. |
3911 | 18 | llvm::SmallVector<llvm::Value *, 8> Args; |
3912 | | |
3913 | 18 | auto EmitArgs = [&](const VersionTuple &Version, const llvm::Triple &TT) { |
3914 | 18 | Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor(); |
3915 | 18 | Args.push_back( |
3916 | 18 | llvm::ConstantInt::get(CGM.Int32Ty, getBaseMachOPlatformID(TT))); |
3917 | 18 | Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor())); |
3918 | 18 | Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Min.value_or(0))); |
3919 | 18 | Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, SMin.value_or(0))); |
3920 | 18 | }; |
3921 | | |
3922 | 18 | assert(!Version.empty() && "unexpected empty version"); |
3923 | 0 | EmitArgs(Version, CGM.getTarget().getTriple()); |
3924 | | |
3925 | 18 | if (!CGM.IsPlatformVersionAtLeastFn) { |
3926 | 14 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
3927 | 14 | CGM.Int32Ty, {CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty}, |
3928 | 14 | false); |
3929 | 14 | CGM.IsPlatformVersionAtLeastFn = |
3930 | 14 | CGM.CreateRuntimeFunction(FTy, "__isPlatformVersionAtLeast"); |
3931 | 14 | } |
3932 | | |
3933 | 18 | llvm::Value *Check = |
3934 | 18 | CGF.EmitNounwindRuntimeCall(CGM.IsPlatformVersionAtLeastFn, Args); |
3935 | 18 | return CGF.Builder.CreateICmpNE(Check, |
3936 | 18 | llvm::Constant::getNullValue(CGM.Int32Ty)); |
3937 | 18 | } |
3938 | | |
3939 | | llvm::Value * |
3940 | 18 | CodeGenFunction::EmitBuiltinAvailable(const VersionTuple &Version) { |
3941 | | // Darwin uses the new __isPlatformVersionAtLeast family of routines. |
3942 | 18 | if (CGM.getTarget().getTriple().isOSDarwin()) |
3943 | 18 | return emitIsPlatformVersionAtLeast(*this, Version); |
3944 | | |
3945 | 0 | if (!CGM.IsOSVersionAtLeastFn) { |
3946 | 0 | llvm::FunctionType *FTy = |
3947 | 0 | llvm::FunctionType::get(Int32Ty, {Int32Ty, Int32Ty, Int32Ty}, false); |
3948 | 0 | CGM.IsOSVersionAtLeastFn = |
3949 | 0 | CGM.CreateRuntimeFunction(FTy, "__isOSVersionAtLeast"); |
3950 | 0 | } |
3951 | |
|
3952 | 0 | Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor(); |
3953 | 0 | llvm::Value *Args[] = { |
3954 | 0 | llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()), |
3955 | 0 | llvm::ConstantInt::get(CGM.Int32Ty, Min.value_or(0)), |
3956 | 0 | llvm::ConstantInt::get(CGM.Int32Ty, SMin.value_or(0))}; |
3957 | |
|
3958 | 0 | llvm::Value *CallRes = |
3959 | 0 | EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args); |
3960 | |
|
3961 | 0 | return Builder.CreateICmpNE(CallRes, llvm::Constant::getNullValue(Int32Ty)); |
3962 | 18 | } |
3963 | | |
3964 | | static bool isFoundationNeededForDarwinAvailabilityCheck( |
3965 | 14 | const llvm::Triple &TT, const VersionTuple &TargetVersion) { |
3966 | 14 | VersionTuple FoundationDroppedInVersion; |
3967 | 14 | switch (TT.getOS()) { |
3968 | 3 | case llvm::Triple::IOS: |
3969 | 5 | case llvm::Triple::TvOS: |
3970 | 5 | FoundationDroppedInVersion = VersionTuple(/*Major=*/13); |
3971 | 5 | break; |
3972 | 2 | case llvm::Triple::WatchOS: |
3973 | 2 | FoundationDroppedInVersion = VersionTuple(/*Major=*/6); |
3974 | 2 | break; |
3975 | 0 | case llvm::Triple::Darwin: |
3976 | 6 | case llvm::Triple::MacOSX: |
3977 | 6 | FoundationDroppedInVersion = VersionTuple(/*Major=*/10, /*Minor=*/15); |
3978 | 6 | break; |
3979 | 1 | case llvm::Triple::DriverKit: |
3980 | | // DriverKit doesn't need Foundation. |
3981 | 1 | return false; |
3982 | 0 | default: |
3983 | 0 | llvm_unreachable("Unexpected OS"); |
3984 | 14 | } |
3985 | 13 | return TargetVersion < FoundationDroppedInVersion; |
3986 | 14 | } |
3987 | | |
3988 | 35.9k | void CodeGenModule::emitAtAvailableLinkGuard() { |
3989 | 35.9k | if (!IsPlatformVersionAtLeastFn) |
3990 | 35.8k | return; |
3991 | | // @available requires CoreFoundation only on Darwin. |
3992 | 14 | if (!Target.getTriple().isOSDarwin()) |
3993 | 0 | return; |
3994 | | // @available doesn't need Foundation on macOS 10.15+, iOS/tvOS 13+, or |
3995 | | // watchOS 6+. |
3996 | 14 | if (!isFoundationNeededForDarwinAvailabilityCheck( |
3997 | 14 | Target.getTriple(), Target.getPlatformMinVersion())) |
3998 | 6 | return; |
3999 | | // Add -framework CoreFoundation to the linker commands. We still want to |
4000 | | // emit the core foundation reference down below because otherwise if |
4001 | | // CoreFoundation is not used in the code, the linker won't link the |
4002 | | // framework. |
4003 | 8 | auto &Context = getLLVMContext(); |
4004 | 8 | llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"), |
4005 | 8 | llvm::MDString::get(Context, "CoreFoundation")}; |
4006 | 8 | LinkerOptionsMetadata.push_back(llvm::MDNode::get(Context, Args)); |
4007 | | // Emit a reference to a symbol from CoreFoundation to ensure that |
4008 | | // CoreFoundation is linked into the final binary. |
4009 | 8 | llvm::FunctionType *FTy = |
4010 | 8 | llvm::FunctionType::get(Int32Ty, {VoidPtrTy}, false); |
4011 | 8 | llvm::FunctionCallee CFFunc = |
4012 | 8 | CreateRuntimeFunction(FTy, "CFBundleGetVersionNumber"); |
4013 | | |
4014 | 8 | llvm::FunctionType *CheckFTy = llvm::FunctionType::get(VoidTy, {}, false); |
4015 | 8 | llvm::FunctionCallee CFLinkCheckFuncRef = CreateRuntimeFunction( |
4016 | 8 | CheckFTy, "__clang_at_available_requires_core_foundation_framework", |
4017 | 8 | llvm::AttributeList(), /*Local=*/true); |
4018 | 8 | llvm::Function *CFLinkCheckFunc = |
4019 | 8 | cast<llvm::Function>(CFLinkCheckFuncRef.getCallee()->stripPointerCasts()); |
4020 | 8 | if (CFLinkCheckFunc->empty()) { |
4021 | 8 | CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage); |
4022 | 8 | CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility); |
4023 | 8 | CodeGenFunction CGF(*this); |
4024 | 8 | CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc)); |
4025 | 8 | CGF.EmitNounwindRuntimeCall(CFFunc, |
4026 | 8 | llvm::Constant::getNullValue(VoidPtrTy)); |
4027 | 8 | CGF.Builder.CreateUnreachable(); |
4028 | 8 | addCompilerUsedGlobal(CFLinkCheckFunc); |
4029 | 8 | } |
4030 | 8 | } |
4031 | | |
4032 | 16.4k | CGObjCRuntime::~CGObjCRuntime() {} |