Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This contains code to emit Aggregate Expr nodes as LLVM code.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CodeGenFunction.h"
14
#include "CGCXXABI.h"
15
#include "CGObjCRuntime.h"
16
#include "CodeGenModule.h"
17
#include "ConstantEmitter.h"
18
#include "clang/AST/ASTContext.h"
19
#include "clang/AST/DeclCXX.h"
20
#include "clang/AST/DeclTemplate.h"
21
#include "clang/AST/StmtVisitor.h"
22
#include "llvm/IR/Constants.h"
23
#include "llvm/IR/Function.h"
24
#include "llvm/IR/GlobalVariable.h"
25
#include "llvm/IR/Intrinsics.h"
26
#include "llvm/IR/IntrinsicInst.h"
27
using namespace clang;
28
using namespace CodeGen;
29
30
//===----------------------------------------------------------------------===//
31
//                        Aggregate Expression Emitter
32
//===----------------------------------------------------------------------===//
33
34
namespace  {
35
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
36
  CodeGenFunction &CGF;
37
  CGBuilderTy &Builder;
38
  AggValueSlot Dest;
39
  bool IsResultUnused;
40
41
125k
  AggValueSlot EnsureSlot(QualType T) {
42
125k
    if (!Dest.isIgnored()) 
return Dest125k
;
43
63
    return CGF.CreateAggTemp(T, "agg.tmp.ensured");
44
63
  }
45
13.7k
  void EnsureDest(QualType T) {
46
13.7k
    if (!Dest.isIgnored()) 
return13.4k
;
47
276
    Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
48
276
  }
49
50
  // Calls `Fn` with a valid return value slot, potentially creating a temporary
51
  // to do so. If a temporary is created, an appropriate copy into `Dest` will
52
  // be emitted, as will lifetime markers.
53
  //
54
  // The given function should take a ReturnValueSlot, and return an RValue that
55
  // points to said slot.
56
  void withReturnValueSlot(const Expr *E,
57
                           llvm::function_ref<RValue(ReturnValueSlot)> Fn);
58
59
public:
60
  AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
61
    : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
62
150k
    IsResultUnused(IsResultUnused) { }
63
64
  //===--------------------------------------------------------------------===//
65
  //                               Utilities
66
  //===--------------------------------------------------------------------===//
67
68
  /// EmitAggLoadOfLValue - Given an expression with aggregate type that
69
  /// represents a value lvalue, this method emits the address of the lvalue,
70
  /// then loads the result into DestPtr.
71
  void EmitAggLoadOfLValue(const Expr *E);
72
73
  enum ExprValueKind {
74
    EVK_RValue,
75
    EVK_NonRValue
76
  };
77
78
  /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
79
  /// SrcIsRValue is true if source comes from an RValue.
80
  void EmitFinalDestCopy(QualType type, const LValue &src,
81
                         ExprValueKind SrcValueKind = EVK_NonRValue);
82
  void EmitFinalDestCopy(QualType type, RValue src);
83
  void EmitCopy(QualType type, const AggValueSlot &dest,
84
                const AggValueSlot &src);
85
86
  void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
87
88
  void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
89
                     QualType ArrayQTy, InitListExpr *E);
90
91
4.91k
  AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
92
4.91k
    if (CGF.getLangOpts().getGC() && 
TypeRequiresGCollection(T)24
)
93
20
      return AggValueSlot::NeedsGCBarriers;
94
4.89k
    return AggValueSlot::DoesNotNeedGCBarriers;
95
4.89k
  }
96
97
  bool TypeRequiresGCollection(QualType T);
98
99
  //===--------------------------------------------------------------------===//
100
  //                            Visitor Methods
101
  //===--------------------------------------------------------------------===//
102
103
216k
  void Visit(Expr *E) {
104
216k
    ApplyDebugLocation DL(CGF, E);
105
216k
    StmtVisitor<AggExprEmitter>::Visit(E);
106
216k
  }
107
108
0
  void VisitStmt(Stmt *S) {
109
0
    CGF.ErrorUnsupported(S, "aggregate expression");
110
0
  }
111
247
  void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
112
0
  void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
113
0
    Visit(GE->getResultExpr());
114
0
  }
115
4
  void VisitCoawaitExpr(CoawaitExpr *E) {
116
4
    CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
117
4
  }
118
0
  void VisitCoyieldExpr(CoyieldExpr *E) {
119
0
    CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
120
0
  }
121
0
  void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
122
825
  void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
123
0
  void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
124
0
    return Visit(E->getReplacement());
125
0
  }
126
127
1
  void VisitConstantExpr(ConstantExpr *E) {
128
1
    return Visit(E->getSubExpr());
129
1
  }
130
131
  // l-values.
132
2.47k
  void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
133
170
  void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
134
278
  void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
135
19
  void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
136
  void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
137
117
  void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
138
117
    EmitAggLoadOfLValue(E);
139
117
  }
140
0
  void VisitPredefinedExpr(const PredefinedExpr *E) {
141
0
    EmitAggLoadOfLValue(E);
142
0
  }
143
144
  // Operators.
145
  void VisitCastExpr(CastExpr *E);
146
  void VisitCallExpr(const CallExpr *E);
147
  void VisitStmtExpr(const StmtExpr *E);
148
  void VisitBinaryOperator(const BinaryOperator *BO);
149
  void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
150
  void VisitBinAssign(const BinaryOperator *E);
151
  void VisitBinComma(const BinaryOperator *E);
152
  void VisitBinCmp(const BinaryOperator *E);
153
154
  void VisitObjCMessageExpr(ObjCMessageExpr *E);
155
3
  void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
156
3
    EmitAggLoadOfLValue(E);
157
3
  }
158
159
  void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
160
  void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
161
  void VisitChooseExpr(const ChooseExpr *CE);
162
  void VisitInitListExpr(InitListExpr *E);
163
  void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
164
                              llvm::Value *outerBegin = nullptr);
165
  void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
166
0
  void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
167
408
  void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
168
408
    CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
169
408
    Visit(DAE->getExpr());
170
408
  }
171
47
  void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
172
47
    CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
173
47
    Visit(DIE->getExpr());
174
47
  }
175
  void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
176
  void VisitCXXConstructExpr(const CXXConstructExpr *E);
177
  void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
178
  void VisitLambdaExpr(LambdaExpr *E);
179
  void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
180
  void VisitExprWithCleanups(ExprWithCleanups *E);
181
  void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
182
0
  void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
183
  void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
184
  void VisitOpaqueValueExpr(OpaqueValueExpr *E);
185
186
39
  void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
187
39
    if (E->isGLValue()) {
188
0
      LValue LV = CGF.EmitPseudoObjectLValue(E);
189
0
      return EmitFinalDestCopy(E->getType(), LV);
190
0
    }
191
39
192
39
    CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
193
39
  }
194
195
  void VisitVAArgExpr(VAArgExpr *E);
196
197
  void EmitInitializationToLValue(Expr *E, LValue Address);
198
  void EmitNullInitializationToLValue(LValue Address);
199
  //  case Expr::ChooseExprClass:
200
1
  void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
201
15
  void VisitAtomicExpr(AtomicExpr *E) {
202
15
    RValue Res = CGF.EmitAtomicExpr(E);
203
15
    EmitFinalDestCopy(E->getType(), Res);
204
15
  }
205
};
206
}  // end anonymous namespace.
207
208
//===----------------------------------------------------------------------===//
209
//                                Utilities
210
//===----------------------------------------------------------------------===//
211
212
/// EmitAggLoadOfLValue - Given an expression with aggregate type that
213
/// represents a value lvalue, this method emits the address of the lvalue,
214
/// then loads the result into DestPtr.
215
3.07k
void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
216
3.07k
  LValue LV = CGF.EmitLValue(E);
217
3.07k
218
3.07k
  // If the type of the l-value is atomic, then do an atomic load.
219
3.07k
  if (LV.getType()->isAtomicType() || 
CGF.LValueIsSuitableForInlineAtomic(LV)3.06k
) {
220
11
    CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
221
11
    return;
222
11
  }
223
3.06k
224
3.06k
  EmitFinalDestCopy(E->getType(), LV);
225
3.06k
}
226
227
/// True if the given aggregate type requires special GC API calls.
228
24
bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
229
24
  // Only record types have members that might require garbage collection.
230
24
  const RecordType *RecordTy = T->getAs<RecordType>();
231
24
  if (!RecordTy) 
return false0
;
232
24
233
24
  // Don't mess with non-trivial C++ types.
234
24
  RecordDecl *Record = RecordTy->getDecl();
235
24
  if (isa<CXXRecordDecl>(Record) &&
236
24
      
(5
cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor()5
||
237
5
       !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
238
0
    return false;
239
24
240
24
  // Check whether the type has an object member.
241
24
  return Record->hasObjectMember();
242
24
}
243
244
void AggExprEmitter::withReturnValueSlot(
245
19.5k
    const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
246
19.5k
  QualType RetTy = E->getType();
247
19.5k
  bool RequiresDestruction =
248
19.5k
      Dest.isIgnored() &&
249
19.5k
      
RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct577
;
250
19.5k
251
19.5k
  // If it makes no observable difference, save a memcpy + temporary.
252
19.5k
  //
253
19.5k
  // We need to always provide our own temporary if destruction is required.
254
19.5k
  // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
255
19.5k
  // its lifetime before we have the chance to emit a proper destructor call.
256
19.5k
  bool UseTemp = Dest.isPotentiallyAliased() || 
Dest.requiresGCollection()19.3k
||
257
19.5k
                 
(19.3k
RequiresDestruction19.3k
&&
!Dest.getAddress().isValid()10
);
258
19.5k
259
19.5k
  Address RetAddr = Address::invalid();
260
19.5k
  Address RetAllocaAddr = Address::invalid();
261
19.5k
262
19.5k
  EHScopeStack::stable_iterator LifetimeEndBlock;
263
19.5k
  llvm::Value *LifetimeSizePtr = nullptr;
264
19.5k
  llvm::IntrinsicInst *LifetimeStartInst = nullptr;
265
19.5k
  if (!UseTemp) {
266
19.3k
    RetAddr = Dest.getAddress();
267
19.3k
  } else {
268
282
    RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr);
269
282
    uint64_t Size =
270
282
        CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
271
282
    LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer());
272
282
    if (LifetimeSizePtr) {
273
151
      LifetimeStartInst =
274
151
          cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
275
151
      assert(LifetimeStartInst->getIntrinsicID() ==
276
151
                 llvm::Intrinsic::lifetime_start &&
277
151
             "Last insertion wasn't a lifetime.start?");
278
151
279
151
      CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
280
151
          NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr);
281
151
      LifetimeEndBlock = CGF.EHStack.stable_begin();
282
151
    }
283
282
  }
284
19.5k
285
19.5k
  RValue Src =
286
19.5k
      EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused));
287
19.5k
288
19.5k
  if (RequiresDestruction)
289
10
    CGF.pushDestroy(RetTy.isDestructedType(), Src.getAggregateAddress(), RetTy);
290
19.5k
291
19.5k
  if (!UseTemp)
292
19.3k
    return;
293
282
294
282
  assert(Dest.getPointer() != Src.getAggregatePointer());
295
282
  EmitFinalDestCopy(E->getType(), Src);
296
282
297
282
  if (!RequiresDestruction && 
LifetimeStartInst272
) {
298
149
    // If there's no dtor to run, the copy was the last use of our temporary.
299
149
    // Since we're not guaranteed to be in an ExprWithCleanups, clean up
300
149
    // eagerly.
301
149
    CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
302
149
    CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer());
303
149
  }
304
282
}
305
306
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
307
301
void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
308
301
  assert(src.isAggregate() && "value must be aggregate value!");
309
301
  LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
310
301
  EmitFinalDestCopy(type, srcLV, EVK_RValue);
311
301
}
312
313
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
314
void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
315
5.00k
                                       ExprValueKind SrcValueKind) {
316
5.00k
  // If Dest is ignored, then we're evaluating an aggregate expression
317
5.00k
  // in a context that doesn't care about the result.  Note that loads
318
5.00k
  // from volatile l-values force the existence of a non-ignored
319
5.00k
  // destination.
320
5.00k
  if (Dest.isIgnored())
321
1.37k
    return;
322
3.62k
323
3.62k
  // Copy non-trivial C structs here.
324
3.62k
  LValue DstLV = CGF.MakeAddrLValue(
325
3.62k
      Dest.getAddress(), Dest.isVolatile() ? 
type.withVolatile()17
:
type3.61k
);
326
3.62k
327
3.62k
  if (SrcValueKind == EVK_RValue) {
328
291
    if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
329
8
      if (Dest.isPotentiallyAliased())
330
8
        CGF.callCStructMoveAssignmentOperator(DstLV, src);
331
0
      else
332
0
        CGF.callCStructMoveConstructor(DstLV, src);
333
8
      return;
334
8
    }
335
3.33k
  } else {
336
3.33k
    if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
337
50
      if (Dest.isPotentiallyAliased())
338
10
        CGF.callCStructCopyAssignmentOperator(DstLV, src);
339
40
      else
340
40
        CGF.callCStructCopyConstructor(DstLV, src);
341
50
      return;
342
50
    }
343
3.57k
  }
344
3.57k
345
3.57k
  AggValueSlot srcAgg =
346
3.57k
    AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
347
3.57k
                            needsGC(type), AggValueSlot::IsAliased,
348
3.57k
                            AggValueSlot::MayOverlap);
349
3.57k
  EmitCopy(type, Dest, srcAgg);
350
3.57k
}
351
352
/// Perform a copy from the source into the destination.
353
///
354
/// \param type - the type of the aggregate being copied; qualifiers are
355
///   ignored
356
void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
357
3.57k
                              const AggValueSlot &src) {
358
3.57k
  if (dest.requiresGCollection()) {
359
6
    CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
360
6
    llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
361
6
    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
362
6
                                                      dest.getAddress(),
363
6
                                                      src.getAddress(),
364
6
                                                      size);
365
6
    return;
366
6
  }
367
3.57k
368
3.57k
  // If the result of the assignment is used, copy the LHS there also.
369
3.57k
  // It's volatile if either side is.  Use the minimum alignment of
370
3.57k
  // the two sides.
371
3.57k
  LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
372
3.57k
  LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
373
3.57k
  CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
374
3.57k
                        dest.isVolatile() || 
src.isVolatile()3.55k
);
375
3.57k
}
376
377
/// Emit the initializer for a std::initializer_list initialized with a
378
/// real initializer list.
379
void
380
241
AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
381
241
  // Emit an array containing the elements.  The array is externally destructed
382
241
  // if the std::initializer_list object is.
383
241
  ASTContext &Ctx = CGF.getContext();
384
241
  LValue Array = CGF.EmitLValue(E->getSubExpr());
385
241
  assert(Array.isSimple() && "initializer_list array not a simple lvalue");
386
241
  Address ArrayPtr = Array.getAddress();
387
241
388
241
  const ConstantArrayType *ArrayType =
389
241
      Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
390
241
  assert(ArrayType && "std::initializer_list constructed from non-array");
391
241
392
241
  // FIXME: Perform the checks on the field types in SemaInit.
393
241
  RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
394
241
  RecordDecl::field_iterator Field = Record->field_begin();
395
241
  if (Field == Record->field_end()) {
396
1
    CGF.ErrorUnsupported(E, "weird std::initializer_list");
397
1
    return;
398
1
  }
399
240
400
240
  // Start pointer.
401
240
  if (!Field->getType()->isPointerType() ||
402
240
      !Ctx.hasSameType(Field->getType()->getPointeeType(),
403
240
                       ArrayType->getElementType())) {
404
0
    CGF.ErrorUnsupported(E, "weird std::initializer_list");
405
0
    return;
406
0
  }
407
240
408
240
  AggValueSlot Dest = EnsureSlot(E->getType());
409
240
  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
410
240
  LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
411
240
  llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
412
240
  llvm::Value *IdxStart[] = { Zero, Zero };
413
240
  llvm::Value *ArrayStart =
414
240
      Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
415
240
  CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
416
240
  ++Field;
417
240
418
240
  if (Field == Record->field_end()) {
419
0
    CGF.ErrorUnsupported(E, "weird std::initializer_list");
420
0
    return;
421
0
  }
422
240
423
240
  llvm::Value *Size = Builder.getInt(ArrayType->getSize());
424
240
  LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
425
240
  if (Field->getType()->isPointerType() &&
426
240
      Ctx.hasSameType(Field->getType()->getPointeeType(),
427
7
                      ArrayType->getElementType())) {
428
7
    // End pointer.
429
7
    llvm::Value *IdxEnd[] = { Zero, Size };
430
7
    llvm::Value *ArrayEnd =
431
7
        Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
432
7
    CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
433
233
  } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
434
233
    // Length.
435
233
    CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
436
233
  } else {
437
0
    CGF.ErrorUnsupported(E, "weird std::initializer_list");
438
0
    return;
439
0
  }
440
240
}
441
442
/// Determine if E is a trivial array filler, that is, one that is
443
/// equivalent to zero-initialization.
444
1.61k
static bool isTrivialFiller(Expr *E) {
445
1.61k
  if (!E)
446
1.54k
    return true;
447
67
448
67
  if (isa<ImplicitValueInitExpr>(E))
449
34
    return true;
450
33
451
33
  if (auto *ILE = dyn_cast<InitListExpr>(E)) {
452
5
    if (ILE->getNumInits())
453
4
      return false;
454
1
    return isTrivialFiller(ILE->getArrayFiller());
455
1
  }
456
28
457
28
  if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
458
25
    return Cons->getConstructor()->isDefaultConstructor() &&
459
25
           
Cons->getConstructor()->isTrivial()23
;
460
3
461
3
  // FIXME: Are there other cases where we can avoid emitting an initializer?
462
3
  return false;
463
3
}
464
465
/// Emit initialization of an array from an initializer list.
466
void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
467
1.61k
                                   QualType ArrayQTy, InitListExpr *E) {
468
1.61k
  uint64_t NumInitElements = E->getNumInits();
469
1.61k
470
1.61k
  uint64_t NumArrayElements = AType->getNumElements();
471
1.61k
  assert(NumInitElements <= NumArrayElements);
472
1.61k
473
1.61k
  QualType elementType =
474
1.61k
      CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
475
1.61k
476
1.61k
  // DestPtr is an array*.  Construct an elementType* by drilling
477
1.61k
  // down a level.
478
1.61k
  llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
479
1.61k
  llvm::Value *indices[] = { zero, zero };
480
1.61k
  llvm::Value *begin =
481
1.61k
    Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
482
1.61k
483
1.61k
  CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
484
1.61k
  CharUnits elementAlign =
485
1.61k
    DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
486
1.61k
487
1.61k
  // Consider initializing the array by copying from a global. For this to be
488
1.61k
  // more efficient than per-element initialization, the size of the elements
489
1.61k
  // with explicit initializers should be large enough.
490
1.61k
  if (NumInitElements * elementSize.getQuantity() > 16 &&
491
1.61k
      
elementType.isTriviallyCopyableType(CGF.getContext())250
) {
492
101
    CodeGen::CodeGenModule &CGM = CGF.CGM;
493
101
    ConstantEmitter Emitter(CGM);
494
101
    LangAS AS = ArrayQTy.getAddressSpace();
495
101
    if (llvm::Constant *C = Emitter.tryEmitForInitializer(E, AS, ArrayQTy)) {
496
8
      auto GV = new llvm::GlobalVariable(
497
8
          CGM.getModule(), C->getType(),
498
8
          CGM.isTypeConstant(ArrayQTy, /* ExcludeCtorDtor= */ true),
499
8
          llvm::GlobalValue::PrivateLinkage, C, "constinit",
500
8
          /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
501
8
          CGM.getContext().getTargetAddressSpace(AS));
502
8
      Emitter.finalize(GV);
503
8
      CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy);
504
8
      GV->setAlignment(Align.getQuantity());
505
8
      EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GV, ArrayQTy, Align));
506
8
      return;
507
8
    }
508
1.61k
  }
509
1.61k
510
1.61k
  // Exception safety requires us to destroy all the
511
1.61k
  // already-constructed members if an initializer throws.
512
1.61k
  // For that, we'll need an EH cleanup.
513
1.61k
  QualType::DestructionKind dtorKind = elementType.isDestructedType();
514
1.61k
  Address endOfInit = Address::invalid();
515
1.61k
  EHScopeStack::stable_iterator cleanup;
516
1.61k
  llvm::Instruction *cleanupDominator = nullptr;
517
1.61k
  if (CGF.needsEHCleanup(dtorKind)) {
518
158
    // In principle we could tell the cleanup where we are more
519
158
    // directly, but the control flow can get so varied here that it
520
158
    // would actually be quite complex.  Therefore we go through an
521
158
    // alloca.
522
158
    endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
523
158
                                     "arrayinit.endOfInit");
524
158
    cleanupDominator = Builder.CreateStore(begin, endOfInit);
525
158
    CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
526
158
                                         elementAlign,
527
158
                                         CGF.getDestroyer(dtorKind));
528
158
    cleanup = CGF.EHStack.stable_begin();
529
158
530
158
  // Otherwise, remember that we didn't need a cleanup.
531
1.45k
  } else {
532
1.45k
    dtorKind = QualType::DK_none;
533
1.45k
  }
534
1.61k
535
1.61k
  llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
536
1.61k
537
1.61k
  // The 'current element to initialize'.  The invariants on this
538
1.61k
  // variable are complicated.  Essentially, after each iteration of
539
1.61k
  // the loop, it points to the last initialized element, except
540
1.61k
  // that it points to the beginning of the array before any
541
1.61k
  // elements have been initialized.
542
1.61k
  llvm::Value *element = begin;
543
1.61k
544
1.61k
  // Emit the explicit initializers.
545
5.31k
  for (uint64_t i = 0; i != NumInitElements; 
++i3.70k
) {
546
3.70k
    // Advance to the next element.
547
3.70k
    if (i > 0) {
548
2.12k
      element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
549
2.12k
550
2.12k
      // Tell the cleanup that it needs to destroy up to this
551
2.12k
      // element.  TODO: some of these stores can be trivially
552
2.12k
      // observed to be unnecessary.
553
2.12k
      if (endOfInit.isValid()) 
Builder.CreateStore(element, endOfInit)418
;
554
2.12k
    }
555
3.70k
556
3.70k
    LValue elementLV =
557
3.70k
      CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
558
3.70k
    EmitInitializationToLValue(E->getInit(i), elementLV);
559
3.70k
  }
560
1.61k
561
1.61k
  // Check whether there's a non-trivial array-fill expression.
562
1.61k
  Expr *filler = E->getArrayFiller();
563
1.61k
  bool hasTrivialFiller = isTrivialFiller(filler);
564
1.61k
565
1.61k
  // Any remaining elements need to be zero-initialized, possibly
566
1.61k
  // using the filler expression.  We can skip this if the we're
567
1.61k
  // emitting to zeroed memory.
568
1.61k
  if (NumInitElements != NumArrayElements &&
569
1.61k
      
!(61
Dest.isZeroed()61
&&
hasTrivialFiller22
&&
570
61
        
CGF.getTypes().isZeroInitializable(elementType)15
)) {
571
46
572
46
    // Use an actual loop.  This is basically
573
46
    //   do { *array++ = filler; } while (array != end);
574
46
575
46
    // Advance to the start of the rest of the array.
576
46
    if (NumInitElements) {
577
21
      element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
578
21
      if (endOfInit.isValid()) 
Builder.CreateStore(element, endOfInit)2
;
579
21
    }
580
46
581
46
    // Compute the end of the array.
582
46
    llvm::Value *end = Builder.CreateInBoundsGEP(begin,
583
46
                      llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
584
46
                                                 "arrayinit.end");
585
46
586
46
    llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
587
46
    llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
588
46
589
46
    // Jump into the body.
590
46
    CGF.EmitBlock(bodyBB);
591
46
    llvm::PHINode *currentElement =
592
46
      Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
593
46
    currentElement->addIncoming(element, entryBB);
594
46
595
46
    // Emit the actual filler expression.
596
46
    {
597
46
      // C++1z [class.temporary]p5:
598
46
      //   when a default constructor is called to initialize an element of
599
46
      //   an array with no corresponding initializer [...] the destruction of
600
46
      //   every temporary created in a default argument is sequenced before
601
46
      //   the construction of the next array element, if any
602
46
      CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
603
46
      LValue elementLV =
604
46
        CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
605
46
      if (filler)
606
46
        EmitInitializationToLValue(filler, elementLV);
607
0
      else
608
0
        EmitNullInitializationToLValue(elementLV);
609
46
    }
610
46
611
46
    // Move on to the next element.
612
46
    llvm::Value *nextElement =
613
46
      Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
614
46
615
46
    // Tell the EH cleanup that we finished with the last element.
616
46
    if (endOfInit.isValid()) 
Builder.CreateStore(nextElement, endOfInit)2
;
617
46
618
46
    // Leave the loop if we're done.
619
46
    llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
620
46
                                             "arrayinit.done");
621
46
    llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
622
46
    Builder.CreateCondBr(done, endBB, bodyBB);
623
46
    currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
624
46
625
46
    CGF.EmitBlock(endBB);
626
46
  }
627
1.61k
628
1.61k
  // Leave the partial-array cleanup if we entered one.
629
1.61k
  if (dtorKind) 
CGF.DeactivateCleanupBlock(cleanup, cleanupDominator)158
;
630
1.61k
}
631
632
//===----------------------------------------------------------------------===//
633
//                            Visitor Methods
634
//===----------------------------------------------------------------------===//
635
636
21.3k
void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
637
21.3k
  Visit(E->GetTemporaryExpr());
638
21.3k
}
639
640
32
void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
641
32
  // If this is a unique OVE, just visit its source expression.
642
32
  if (e->isUnique())
643
12
    Visit(e->getSourceExpr());
644
20
  else
645
20
    EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
646
32
}
647
648
void
649
240
AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
650
240
  if (Dest.isPotentiallyAliased() &&
651
240
      
E->getType().isPODType(CGF.getContext())18
) {
652
18
    // For a POD type, just emit a load of the lvalue + a copy, because our
653
18
    // compound literal might alias the destination.
654
18
    EmitAggLoadOfLValue(E);
655
18
    return;
656
18
  }
657
222
658
222
  AggValueSlot Slot = EnsureSlot(E->getType());
659
222
  CGF.EmitAggExpr(E->getInitializer(), Slot);
660
222
}
661
662
/// Attempt to look through various unimportant expressions to find a
663
/// cast of the given kind.
664
9
static Expr *findPeephole(Expr *op, CastKind kind) {
665
9
  while (true) {
666
9
    op = op->IgnoreParens();
667
9
    if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
668
9
      if (castE->getCastKind() == kind)
669
0
        return castE->getSubExpr();
670
9
      if (castE->getCastKind() == CK_NoOp)
671
0
        continue;
672
9
    }
673
9
    return nullptr;
674
9
  }
675
9
}
676
677
18.9k
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
678
18.9k
  if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
679
4.40k
    CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
680
18.9k
  switch (E->getCastKind()) {
681
18.9k
  case CK_Dynamic: {
682
0
    // FIXME: Can this actually happen? We have no test coverage for it.
683
0
    assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
684
0
    LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
685
0
                                      CodeGenFunction::TCK_Load);
686
0
    // FIXME: Do we also need to handle property references here?
687
0
    if (LV.isSimple())
688
0
      CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
689
0
    else
690
0
      CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
691
0
692
0
    if (!Dest.isIgnored())
693
0
      CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
694
0
    break;
695
18.9k
  }
696
18.9k
697
18.9k
  case CK_ToUnion: {
698
8
    // Evaluate even if the destination is ignored.
699
8
    if (Dest.isIgnored()) {
700
1
      CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
701
1
                      /*ignoreResult=*/true);
702
1
      break;
703
1
    }
704
7
705
7
    // GCC union extension
706
7
    QualType Ty = E->getSubExpr()->getType();
707
7
    Address CastPtr =
708
7
      Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
709
7
    EmitInitializationToLValue(E->getSubExpr(),
710
7
                               CGF.MakeAddrLValue(CastPtr, Ty));
711
7
    break;
712
7
  }
713
7
714
7
  case CK_LValueToRValueBitCast: {
715
4
    if (Dest.isIgnored()) {
716
0
      CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
717
0
                      /*ignoreResult=*/true);
718
0
      break;
719
0
    }
720
4
721
4
    LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
722
4
    Address SourceAddress =
723
4
        Builder.CreateElementBitCast(SourceLV.getAddress(), CGF.Int8Ty);
724
4
    Address DestAddress =
725
4
        Builder.CreateElementBitCast(Dest.getAddress(), CGF.Int8Ty);
726
4
    llvm::Value *SizeVal = llvm::ConstantInt::get(
727
4
        CGF.SizeTy,
728
4
        CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity());
729
4
    Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
730
4
    break;
731
4
  }
732
4
733
4
  case CK_DerivedToBase:
734
0
  case CK_BaseToDerived:
735
0
  case CK_UncheckedDerivedToBase: {
736
0
    llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
737
0
                "should have been unpacked before we got here");
738
0
  }
739
0
740
23
  case CK_NonAtomicToAtomic:
741
23
  case CK_AtomicToNonAtomic: {
742
23
    bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
743
23
744
23
    // Determine the atomic and value types.
745
23
    QualType atomicType = E->getSubExpr()->getType();
746
23
    QualType valueType = E->getType();
747
23
    if (isToAtomic) 
std::swap(atomicType, valueType)15
;
748
23
749
23
    assert(atomicType->isAtomicType());
750
23
    assert(CGF.getContext().hasSameUnqualifiedType(valueType,
751
23
                          atomicType->castAs<AtomicType>()->getValueType()));
752
23
753
23
    // Just recurse normally if we're ignoring the result or the
754
23
    // atomic type doesn't change representation.
755
23
    if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
756
14
      return Visit(E->getSubExpr());
757
14
    }
758
9
759
9
    CastKind peepholeTarget =
760
9
      (isToAtomic ? 
CK_AtomicToNonAtomic5
:
CK_NonAtomicToAtomic4
);
761
9
762
9
    // These two cases are reverses of each other; try to peephole them.
763
9
    if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
764
0
      assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
765
0
                                                     E->getType()) &&
766
0
           "peephole significantly changed types?");
767
0
      return Visit(op);
768
0
    }
769
9
770
9
    // If we're converting an r-value of non-atomic type to an r-value
771
9
    // of atomic type, just emit directly into the relevant sub-object.
772
9
    if (isToAtomic) {
773
5
      AggValueSlot valueDest = Dest;
774
5
      if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
775
5
        // Zero-initialize.  (Strictly speaking, we only need to initialize
776
5
        // the padding at the end, but this is simpler.)
777
5
        if (!Dest.isZeroed())
778
5
          CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
779
5
780
5
        // Build a GEP to refer to the subobject.
781
5
        Address valueAddr =
782
5
            CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
783
5
        valueDest = AggValueSlot::forAddr(valueAddr,
784
5
                                          valueDest.getQualifiers(),
785
5
                                          valueDest.isExternallyDestructed(),
786
5
                                          valueDest.requiresGCollection(),
787
5
                                          valueDest.isPotentiallyAliased(),
788
5
                                          AggValueSlot::DoesNotOverlap,
789
5
                                          AggValueSlot::IsZeroed);
790
5
      }
791
5
792
5
      CGF.EmitAggExpr(E->getSubExpr(), valueDest);
793
5
      return;
794
5
    }
795
4
796
4
    // Otherwise, we're converting an atomic type to a non-atomic type.
797
4
    // Make an atomic temporary, emit into that, and then copy the value out.
798
4
    AggValueSlot atomicSlot =
799
4
      CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
800
4
    CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
801
4
802
4
    Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
803
4
    RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
804
4
    return EmitFinalDestCopy(valueType, rvalue);
805
4
  }
806
4
  case CK_AddressSpaceConversion:
807
0
     return Visit(E->getSubExpr());
808
4
809
3.20k
  case CK_LValueToRValue:
810
3.20k
    // If we're loading from a volatile type, force the destination
811
3.20k
    // into existence.
812
3.20k
    if (E->getSubExpr()->getType().isVolatileQualified()) {
813
15
      EnsureDest(E->getType());
814
15
      return Visit(E->getSubExpr());
815
15
    }
816
3.18k
817
3.18k
    LLVM_FALLTHROUGH;
818
3.18k
819
3.18k
820
18.9k
  case CK_NoOp:
821
18.9k
  case CK_UserDefinedConversion:
822
18.9k
  case CK_ConstructorConversion:
823
18.9k
    assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
824
18.9k
                                                   E->getType()) &&
825
18.9k
           "Implicit cast types must be compatible");
826
18.9k
    Visit(E->getSubExpr());
827
18.9k
    break;
828
18.9k
829
18.9k
  case CK_LValueBitCast:
830
0
    llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
831
18.9k
832
18.9k
  case CK_Dependent:
833
0
  case CK_BitCast:
834
0
  case CK_ArrayToPointerDecay:
835
0
  case CK_FunctionToPointerDecay:
836
0
  case CK_NullToPointer:
837
0
  case CK_NullToMemberPointer:
838
0
  case CK_BaseToDerivedMemberPointer:
839
0
  case CK_DerivedToBaseMemberPointer:
840
0
  case CK_MemberPointerToBoolean:
841
0
  case CK_ReinterpretMemberPointer:
842
0
  case CK_IntegralToPointer:
843
0
  case CK_PointerToIntegral:
844
0
  case CK_PointerToBoolean:
845
0
  case CK_ToVoid:
846
0
  case CK_VectorSplat:
847
0
  case CK_IntegralCast:
848
0
  case CK_BooleanToSignedIntegral:
849
0
  case CK_IntegralToBoolean:
850
0
  case CK_IntegralToFloating:
851
0
  case CK_FloatingToIntegral:
852
0
  case CK_FloatingToBoolean:
853
0
  case CK_FloatingCast:
854
0
  case CK_CPointerToObjCPointerCast:
855
0
  case CK_BlockPointerToObjCPointerCast:
856
0
  case CK_AnyPointerToBlockPointerCast:
857
0
  case CK_ObjCObjectLValueCast:
858
0
  case CK_FloatingRealToComplex:
859
0
  case CK_FloatingComplexToReal:
860
0
  case CK_FloatingComplexToBoolean:
861
0
  case CK_FloatingComplexCast:
862
0
  case CK_FloatingComplexToIntegralComplex:
863
0
  case CK_IntegralRealToComplex:
864
0
  case CK_IntegralComplexToReal:
865
0
  case CK_IntegralComplexToBoolean:
866
0
  case CK_IntegralComplexCast:
867
0
  case CK_IntegralComplexToFloatingComplex:
868
0
  case CK_ARCProduceObject:
869
0
  case CK_ARCConsumeObject:
870
0
  case CK_ARCReclaimReturnedObject:
871
0
  case CK_ARCExtendBlockObject:
872
0
  case CK_CopyAndAutoreleaseBlockObject:
873
0
  case CK_BuiltinFnToFnPtr:
874
0
  case CK_ZeroToOCLOpaqueType:
875
0
876
0
  case CK_IntToOCLSampler:
877
0
  case CK_FixedPointCast:
878
0
  case CK_FixedPointToBoolean:
879
0
  case CK_FixedPointToIntegral:
880
0
  case CK_IntegralToFixedPoint:
881
0
    llvm_unreachable("cast kind invalid for aggregate types");
882
18.9k
  }
883
18.9k
}
884
885
19.5k
void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
886
19.5k
  if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
887
0
    EmitAggLoadOfLValue(E);
888
0
    return;
889
0
  }
890
19.5k
891
19.5k
  withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
892
19.5k
    return CGF.EmitCallExpr(E, Slot);
893
19.5k
  });
894
19.5k
}
895
896
69
void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
897
69
  withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
898
69
    return CGF.EmitObjCMessageExpr(E, Slot);
899
69
  });
900
69
}
901
902
52
void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
903
52
  CGF.EmitIgnoredExpr(E->getLHS());
904
52
  Visit(E->getRHS());
905
52
}
906
907
837
void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
908
837
  CodeGenFunction::StmtExprEvaluation eval(CGF);
909
837
  CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
910
837
}
911
912
enum CompareKind {
913
  CK_Less,
914
  CK_Greater,
915
  CK_Equal,
916
};
917
918
static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
919
                                const BinaryOperator *E, llvm::Value *LHS,
920
                                llvm::Value *RHS, CompareKind Kind,
921
30
                                const char *NameSuffix = "") {
922
30
  QualType ArgTy = E->getLHS()->getType();
923
30
  if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
924
4
    ArgTy = CT->getElementType();
925
30
926
30
  if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
927
2
    assert(Kind == CK_Equal &&
928
2
           "member pointers may only be compared for equality");
929
2
    return CGF.CGM.getCXXABI().EmitMemberPointerComparison(
930
2
        CGF, LHS, RHS, MPT, /*IsInequality*/ false);
931
2
  }
932
28
933
28
  // Compute the comparison instructions for the specified comparison kind.
934
28
  struct CmpInstInfo {
935
28
    const char *Name;
936
28
    llvm::CmpInst::Predicate FCmp;
937
28
    llvm::CmpInst::Predicate SCmp;
938
28
    llvm::CmpInst::Predicate UCmp;
939
28
  };
940
28
  CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
941
28
    using FI = llvm::FCmpInst;
942
28
    using II = llvm::ICmpInst;
943
28
    switch (Kind) {
944
28
    case CK_Less:
945
11
      return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
946
28
    case CK_Greater:
947
1
      return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
948
28
    case CK_Equal:
949
16
      return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
950
0
    }
951
0
    llvm_unreachable("Unrecognised CompareKind enum");
952
0
  }();
953
28
954
28
  if (ArgTy->hasFloatingRepresentation())
955
5
    return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
956
5
                              llvm::Twine(InstInfo.Name) + NameSuffix);
957
23
  if (ArgTy->isIntegralOrEnumerationType() || 
ArgTy->isPointerType()3
) {
958
23
    auto Inst =
959
23
        ArgTy->hasSignedIntegerRepresentation() ? 
InstInfo.SCmp12
:
InstInfo.UCmp11
;
960
23
    return Builder.CreateICmp(Inst, LHS, RHS,
961
23
                              llvm::Twine(InstInfo.Name) + NameSuffix);
962
23
  }
963
0
964
0
  llvm_unreachable("unsupported aggregate binary expression should have "
965
0
                   "already been handled");
966
0
}
967
968
17
void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
969
17
  using llvm::BasicBlock;
970
17
  using llvm::PHINode;
971
17
  using llvm::Value;
972
17
  assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
973
17
                                      E->getRHS()->getType()));
974
17
  const ComparisonCategoryInfo &CmpInfo =
975
17
      CGF.getContext().CompCategories.getInfoForType(E->getType());
976
17
  assert(CmpInfo.Record->isTriviallyCopyable() &&
977
17
         "cannot copy non-trivially copyable aggregate");
978
17
979
17
  QualType ArgTy = E->getLHS()->getType();
980
17
981
17
  // TODO: Handle comparing these types.
982
17
  if (ArgTy->isVectorType())
983
0
    return CGF.ErrorUnsupported(
984
0
        E, "aggregate three-way comparison with vector arguments");
985
17
  if (!ArgTy->isIntegralOrEnumerationType() && 
!ArgTy->isRealFloatingType()8
&&
986
17
      
!ArgTy->isNullPtrType()7
&&
!ArgTy->isPointerType()6
&&
987
17
      
!ArgTy->isMemberPointerType()4
&&
!ArgTy->isAnyComplexType()2
) {
988
0
    return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
989
0
  }
990
17
  bool IsComplex = ArgTy->isAnyComplexType();
991
17
992
17
  // Evaluate the operands to the expression and extract their values.
993
34
  auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
994
34
    RValue RV = CGF.EmitAnyExpr(E);
995
34
    if (RV.isScalar())
996
30
      return {RV.getScalarVal(), nullptr};
997
4
    if (RV.isAggregate())
998
0
      return {RV.getAggregatePointer(), nullptr};
999
4
    assert(RV.isComplex());
1000
4
    return RV.getComplexVal();
1001
4
  };
1002
17
  auto LHSValues = EmitOperand(E->getLHS()),
1003
17
       RHSValues = EmitOperand(E->getRHS());
1004
17
1005
28
  auto EmitCmp = [&](CompareKind K) {
1006
28
    Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
1007
28
                             K, IsComplex ? 
".r"2
:
""26
);
1008
28
    if (!IsComplex)
1009
26
      return Cmp;
1010
2
    assert(K == CompareKind::CK_Equal);
1011
2
    Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
1012
2
                                 RHSValues.second, K, ".i");
1013
2
    return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
1014
2
  };
1015
45
  auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
1016
45
    return Builder.getInt(VInfo->getIntValue());
1017
45
  };
1018
17
1019
17
  Value *Select;
1020
17
  if (ArgTy->isNullPtrType()) {
1021
1
    Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1022
16
  } else if (CmpInfo.isEquality()) {
1023
5
    Select = Builder.CreateSelect(
1024
5
        EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1025
5
        EmitCmpRes(CmpInfo.getNonequalOrNonequiv()), "sel.eq");
1026
11
  } else if (!CmpInfo.isPartial()) {
1027
10
    Value *SelectOne =
1028
10
        Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
1029
10
                             EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
1030
10
    Select = Builder.CreateSelect(EmitCmp(CK_Equal),
1031
10
                                  EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1032
10
                                  SelectOne, "sel.eq");
1033
10
  } else {
1034
1
    Value *SelectEq = Builder.CreateSelect(
1035
1
        EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1036
1
        EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
1037
1
    Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
1038
1
                                           EmitCmpRes(CmpInfo.getGreater()),
1039
1
                                           SelectEq, "sel.gt");
1040
1
    Select = Builder.CreateSelect(
1041
1
        EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
1042
1
  }
1043
17
  // Create the return value in the destination slot.
1044
17
  EnsureDest(E->getType());
1045
17
  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1046
17
1047
17
  // Emit the address of the first (and only) field in the comparison category
1048
17
  // type, and initialize it from the constant integer value selected above.
1049
17
  LValue FieldLV = CGF.EmitLValueForFieldInitialization(
1050
17
      DestLV, *CmpInfo.Record->field_begin());
1051
17
  CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
1052
17
1053
17
  // All done! The result is in the Dest slot.
1054
17
}
1055
1056
0
void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1057
0
  if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1058
0
    VisitPointerToDataMemberBinaryOperator(E);
1059
0
  else
1060
0
    CGF.ErrorUnsupported(E, "aggregate binary expression");
1061
0
}
1062
1063
void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1064
0
                                                    const BinaryOperator *E) {
1065
0
  LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
1066
0
  EmitFinalDestCopy(E->getType(), LV);
1067
0
}
1068
1069
/// Is the value of the given expression possibly a reference to or
1070
/// into a __block variable?
1071
2.13k
static bool isBlockVarRef(const Expr *E) {
1072
2.13k
  // Make sure we look through parens.
1073
2.13k
  E = E->IgnoreParens();
1074
2.13k
1075
2.13k
  // Check for a direct reference to a __block variable.
1076
2.13k
  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
1077
779
    const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
1078
779
    return (var && var->hasAttr<BlocksAttr>());
1079
779
  }
1080
1.35k
1081
1.35k
  // More complicated stuff.
1082
1.35k
1083
1.35k
  // Binary operators.
1084
1.35k
  if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
1085
3
    // For an assignment or pointer-to-member operation, just care
1086
3
    // about the LHS.
1087
3
    if (op->isAssignmentOp() || 
op->isPtrMemOp()1
)
1088
2
      return isBlockVarRef(op->getLHS());
1089
1
1090
1
    // For a comma, just care about the RHS.
1091
1
    if (op->getOpcode() == BO_Comma)
1092
0
      return isBlockVarRef(op->getRHS());
1093
1
1094
1
    // FIXME: pointer arithmetic?
1095
1
    return false;
1096
1
1097
1
  // Check both sides of a conditional operator.
1098
1.35k
  } else if (const AbstractConditionalOperator *op
1099
0
               = dyn_cast<AbstractConditionalOperator>(E)) {
1100
0
    return isBlockVarRef(op->getTrueExpr())
1101
0
        || isBlockVarRef(op->getFalseExpr());
1102
0
1103
0
  // OVEs are required to support BinaryConditionalOperators.
1104
1.35k
  } else if (const OpaqueValueExpr *op
1105
0
               = dyn_cast<OpaqueValueExpr>(E)) {
1106
0
    if (const Expr *src = op->getSourceExpr())
1107
0
      return isBlockVarRef(src);
1108
1.35k
1109
1.35k
  // Casts are necessary to get things like (*(int*)&var) = foo().
1110
1.35k
  // We don't really care about the kind of cast here, except
1111
1.35k
  // we don't want to look through l2r casts, because it's okay
1112
1.35k
  // to get the *value* in a __block variable.
1113
1.35k
  } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
1114
602
    if (cast->getCastKind() == CK_LValueToRValue)
1115
565
      return false;
1116
37
    return isBlockVarRef(cast->getSubExpr());
1117
37
1118
37
  // Handle unary operators.  Again, just aggressively look through
1119
37
  // it, ignoring the operation.
1120
752
  } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
1121
437
    return isBlockVarRef(uop->getSubExpr());
1122
437
1123
437
  // Look into the base of a field access.
1124
437
  } else 
if (const MemberExpr *315
mem315
= dyn_cast<MemberExpr>(E)) {
1125
187
    return isBlockVarRef(mem->getBase());
1126
187
1127
187
  // Look into the base of a subscript.
1128
187
  } else 
if (const ArraySubscriptExpr *128
sub128
= dyn_cast<ArraySubscriptExpr>(E)) {
1129
120
    return isBlockVarRef(sub->getBase());
1130
120
  }
1131
8
1132
8
  return false;
1133
8
}
1134
1135
1.35k
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1136
1.35k
  // For an assignment to work, the value on the right has
1137
1.35k
  // to be compatible with the value on the left.
1138
1.35k
  assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1139
1.35k
                                                 E->getRHS()->getType())
1140
1.35k
         && "Invalid assignment");
1141
1.35k
1142
1.35k
  // If the LHS might be a __block variable, and the RHS can
1143
1.35k
  // potentially cause a block copy, we need to evaluate the RHS first
1144
1.35k
  // so that the assignment goes the right place.
1145
1.35k
  // This is pretty semantically fragile.
1146
1.35k
  if (isBlockVarRef(E->getLHS()) &&
1147
1.35k
      
E->getRHS()->HasSideEffects(CGF.getContext())5
) {
1148
5
    // Ensure that we have a destination, and evaluate the RHS into that.
1149
5
    EnsureDest(E->getRHS()->getType());
1150
5
    Visit(E->getRHS());
1151
5
1152
5
    // Now emit the LHS and copy into it.
1153
5
    LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
1154
5
1155
5
    // That copy is an atomic copy if the LHS is atomic.
1156
5
    if (LHS.getType()->isAtomicType() ||
1157
5
        CGF.LValueIsSuitableForInlineAtomic(LHS)) {
1158
0
      CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1159
0
      return;
1160
0
    }
1161
5
1162
5
    EmitCopy(E->getLHS()->getType(),
1163
5
             AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
1164
5
                                     needsGC(E->getLHS()->getType()),
1165
5
                                     AggValueSlot::IsAliased,
1166
5
                                     AggValueSlot::MayOverlap),
1167
5
             Dest);
1168
5
    return;
1169
5
  }
1170
1.34k
1171
1.34k
  LValue LHS = CGF.EmitLValue(E->getLHS());
1172
1.34k
1173
1.34k
  // If we have an atomic type, evaluate into the destination and then
1174
1.34k
  // do an atomic copy.
1175
1.34k
  if (LHS.getType()->isAtomicType() ||
1176
1.34k
      
CGF.LValueIsSuitableForInlineAtomic(LHS)1.34k
) {
1177
10
    EnsureDest(E->getRHS()->getType());
1178
10
    Visit(E->getRHS());
1179
10
    CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1180
10
    return;
1181
10
  }
1182
1.33k
1183
1.33k
  // Codegen the RHS so that it stores directly into the LHS.
1184
1.33k
  AggValueSlot LHSSlot =
1185
1.33k
    AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
1186
1.33k
                            needsGC(E->getLHS()->getType()),
1187
1.33k
                            AggValueSlot::IsAliased,
1188
1.33k
                            AggValueSlot::MayOverlap);
1189
1.33k
  // A non-volatile aggregate destination might have volatile member.
1190
1.33k
  if (!LHSSlot.isVolatile() &&
1191
1.33k
      
CGF.hasVolatileMember(E->getLHS()->getType())1.32k
)
1192
5
    LHSSlot.setVolatile(true);
1193
1.33k
1194
1.33k
  CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1195
1.33k
1196
1.33k
  // Copy into the destination if the assignment isn't ignored.
1197
1.33k
  EmitFinalDestCopy(E->getType(), LHS);
1198
1.33k
}
1199
1200
void AggExprEmitter::
1201
109
VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
1202
109
  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1203
109
  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1204
109
  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1205
109
1206
109
  // Bind the common expression if necessary.
1207
109
  CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1208
109
1209
109
  CodeGenFunction::ConditionalEvaluation eval(CGF);
1210
109
  CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1211
109
                           CGF.getProfileCount(E));
1212
109
1213
109
  // Save whether the destination's lifetime is externally managed.
1214
109
  bool isExternallyDestructed = Dest.isExternallyDestructed();
1215
109
1216
109
  eval.begin(CGF);
1217
109
  CGF.EmitBlock(LHSBlock);
1218
109
  CGF.incrementProfileCounter(E);
1219
109
  Visit(E->getTrueExpr());
1220
109
  eval.end(CGF);
1221
109
1222
109
  assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1223
109
  CGF.Builder.CreateBr(ContBlock);
1224
109
1225
109
  // If the result of an agg expression is unused, then the emission
1226
109
  // of the LHS might need to create a destination slot.  That's fine
1227
109
  // with us, and we can safely emit the RHS into the same slot, but
1228
109
  // we shouldn't claim that it's already being destructed.
1229
109
  Dest.setExternallyDestructed(isExternallyDestructed);
1230
109
1231
109
  eval.begin(CGF);
1232
109
  CGF.EmitBlock(RHSBlock);
1233
109
  Visit(E->getFalseExpr());
1234
109
  eval.end(CGF);
1235
109
1236
109
  CGF.EmitBlock(ContBlock);
1237
109
}
1238
1239
0
void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1240
0
  Visit(CE->getChosenSubExpr());
1241
0
}
1242
1243
270
void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1244
270
  Address ArgValue = Address::invalid();
1245
270
  Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
1246
270
1247
270
  // If EmitVAArg fails, emit an error.
1248
270
  if (!ArgPtr.isValid()) {
1249
0
    CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1250
0
    return;
1251
0
  }
1252
270
1253
270
  EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
1254
270
}
1255
1256
13.7k
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1257
13.7k
  // Ensure that we have a slot, but if we already do, remember
1258
13.7k
  // whether it was externally destructed.
1259
13.7k
  bool wasExternallyDestructed = Dest.isExternallyDestructed();
1260
13.7k
  EnsureDest(E->getType());
1261
13.7k
1262
13.7k
  // We're going to push a destructor if there isn't already one.
1263
13.7k
  Dest.setExternallyDestructed();
1264
13.7k
1265
13.7k
  Visit(E->getSubExpr());
1266
13.7k
1267
13.7k
  // Push that destructor we promised.
1268
13.7k
  if (!wasExternallyDestructed)
1269
1.56k
    CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1270
13.7k
}
1271
1272
void
1273
116k
AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1274
116k
  AggValueSlot Slot = EnsureSlot(E->getType());
1275
116k
  CGF.EmitCXXConstructExpr(E, Slot);
1276
116k
}
1277
1278
void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1279
107
    const CXXInheritedCtorInitExpr *E) {
1280
107
  AggValueSlot Slot = EnsureSlot(E->getType());
1281
107
  CGF.EmitInheritedCXXConstructorCall(
1282
107
      E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1283
107
      E->inheritedFromVBase(), E);
1284
107
}
1285
1286
void
1287
1.61k
AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1288
1.61k
  AggValueSlot Slot = EnsureSlot(E->getType());
1289
1.61k
  LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1290
1.61k
1291
1.61k
  // We'll need to enter cleanup scopes in case any of the element
1292
1.61k
  // initializers throws an exception.
1293
1.61k
  SmallVector<EHScopeStack::stable_iterator, 16> Cleanups;
1294
1.61k
  llvm::Instruction *CleanupDominator = nullptr;
1295
1.61k
1296
1.61k
  CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1297
1.61k
  for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
1298
1.61k
                                               e = E->capture_init_end();
1299
4.52k
       i != e; 
++i, ++CurField2.90k
) {
1300
2.90k
    // Emit initialization
1301
2.90k
    LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1302
2.90k
    if (CurField->hasCapturedVLAType()) {
1303
24
      CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
1304
24
      continue;
1305
24
    }
1306
2.88k
1307
2.88k
    EmitInitializationToLValue(*i, LV);
1308
2.88k
1309
2.88k
    // Push a destructor if necessary.
1310
2.88k
    if (QualType::DestructionKind DtorKind =
1311
13
            CurField->getType().isDestructedType()) {
1312
13
      assert(LV.isSimple());
1313
13
      if (CGF.needsEHCleanup(DtorKind)) {
1314
6
        if (!CleanupDominator)
1315
5
          CleanupDominator = CGF.Builder.CreateAlignedLoad(
1316
5
              CGF.Int8Ty,
1317
5
              llvm::Constant::getNullValue(CGF.Int8PtrTy),
1318
5
              CharUnits::One()); // placeholder
1319
6
1320
6
        CGF.pushDestroy(EHCleanup, LV.getAddress(), CurField->getType(),
1321
6
                        CGF.getDestroyer(DtorKind), false);
1322
6
        Cleanups.push_back(CGF.EHStack.stable_begin());
1323
6
      }
1324
13
    }
1325
2.88k
  }
1326
1.61k
1327
1.61k
  // Deactivate all the partial cleanups in reverse order, which
1328
1.61k
  // generally means popping them.
1329
1.62k
  for (unsigned i = Cleanups.size(); i != 0; 
--i6
)
1330
6
    CGF.DeactivateCleanupBlock(Cleanups[i-1], CleanupDominator);
1331
1.61k
1332
1.61k
  // Destroy the placeholder if we made one.
1333
1.61k
  if (CleanupDominator)
1334
5
    CleanupDominator->eraseFromParent();
1335
1.61k
}
1336
1337
10.0k
void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1338
10.0k
  CGF.enterFullExpression(E);
1339
10.0k
  CodeGenFunction::RunCleanupsScope cleanups(CGF);
1340
10.0k
  Visit(E->getSubExpr());
1341
10.0k
}
1342
1343
0
void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1344
0
  QualType T = E->getType();
1345
0
  AggValueSlot Slot = EnsureSlot(T);
1346
0
  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1347
0
}
1348
1349
18
void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1350
18
  QualType T = E->getType();
1351
18
  AggValueSlot Slot = EnsureSlot(T);
1352
18
  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1353
18
}
1354
1355
/// isSimpleZero - If emitting this value will obviously just cause a store of
1356
/// zero to memory, return true.  This can return false if uncertain, so it just
1357
/// handles simple cases.
1358
17.8k
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1359
17.8k
  E = E->IgnoreParens();
1360
17.8k
1361
17.8k
  // 0
1362
17.8k
  if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1363
105
    return IL->getValue() == 0;
1364
17.7k
  // +0.0
1365
17.7k
  if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1366
10
    return FL->getValue().isPosZero();
1367
17.7k
  // int()
1368
17.7k
  if ((isa<ImplicitValueInitExpr>(E) || 
isa<CXXScalarValueInitExpr>(E)17.5k
) &&
1369
17.7k
      
CGF.getTypes().isZeroInitializable(E->getType())235
)
1370
235
    return true;
1371
17.5k
  // (int*)0 - Null pointer expressions.
1372
17.5k
  if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1373
8.79k
    return ICE->getCastKind() == CK_NullToPointer &&
1374
8.79k
           
CGF.getTypes().isPointerZeroInitializable(E->getType())91
&&
1375
8.79k
           
!E->HasSideEffects(CGF.getContext())91
;
1376
8.70k
  // '\0'
1377
8.70k
  if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1378
1
    return CL->getValue() == 0;
1379
8.70k
1380
8.70k
  // Otherwise, hard case: conservatively return false.
1381
8.70k
  return false;
1382
8.70k
}
1383
1384
1385
void
1386
18.0k
AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1387
18.0k
  QualType type = LV.getType();
1388
18.0k
  // FIXME: Ignore result?
1389
18.0k
  // FIXME: Are initializers affected by volatile?
1390
18.0k
  if (Dest.isZeroed() && 
isSimpleZero(E, CGF)89
) {
1391
42
    // Storing "i32 0" to a zero'd memory location is a noop.
1392
42
    return;
1393
18.0k
  } else if (isa<ImplicitValueInitExpr>(E) || 
isa<CXXScalarValueInitExpr>(E)17.8k
) {
1394
142
    return EmitNullInitializationToLValue(LV);
1395
17.8k
  } else if (isa<NoInitExpr>(E)) {
1396
14
    // Do nothing.
1397
14
    return;
1398
17.8k
  } else if (type->isReferenceType()) {
1399
2.76k
    RValue RV = CGF.EmitReferenceBindingToExpr(E);
1400
2.76k
    return CGF.EmitStoreThroughLValue(RV, LV);
1401
2.76k
  }
1402
15.0k
1403
15.0k
  switch (CGF.getEvaluationKind(type)) {
1404
15.0k
  case TEK_Complex:
1405
8
    CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1406
8
    return;
1407
15.0k
  case TEK_Aggregate:
1408
3.54k
    CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
1409
3.54k
                                               AggValueSlot::IsDestructed,
1410
3.54k
                                      AggValueSlot::DoesNotNeedGCBarriers,
1411
3.54k
                                               AggValueSlot::IsNotAliased,
1412
3.54k
                                               AggValueSlot::MayOverlap,
1413
3.54k
                                               Dest.isZeroed()));
1414
3.54k
    return;
1415
15.0k
  case TEK_Scalar:
1416
11.5k
    if (LV.isSimple()) {
1417
11.5k
      CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1418
11.5k
    } else {
1419
16
      CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1420
16
    }
1421
11.5k
    return;
1422
0
  }
1423
0
  llvm_unreachable("bad evaluation kind");
1424
0
}
1425
1426
164
void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1427
164
  QualType type = lv.getType();
1428
164
1429
164
  // If the destination slot is already zeroed out before the aggregate is
1430
164
  // copied into it, we don't have to emit any zeros here.
1431
164
  if (Dest.isZeroed() && 
CGF.getTypes().isZeroInitializable(type)10
)
1432
10
    return;
1433
154
1434
154
  if (CGF.hasScalarEvaluationKind(type)) {
1435
136
    // For non-aggregates, we can store the appropriate null constant.
1436
136
    llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1437
136
    // Note that the following is not equivalent to
1438
136
    // EmitStoreThroughBitfieldLValue for ARC types.
1439
136
    if (lv.isBitField()) {
1440
1
      CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1441
135
    } else {
1442
135
      assert(lv.isSimple());
1443
135
      CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1444
135
    }
1445
136
  } else {
1446
18
    // There's a potential optimization opportunity in combining
1447
18
    // memsets; that would be easy for arrays, but relatively
1448
18
    // difficult for structures with the current code.
1449
18
    CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1450
18
  }
1451
154
}
1452
1453
6.56k
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1454
#if 0
1455
  // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
1456
  // (Length of globals? Chunks of zeroed-out space?).
1457
  //
1458
  // If we can, prefer a copy from a global; this is a lot less code for long
1459
  // globals, and it's easier for the current optimizers to analyze.
1460
  if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
1461
    llvm::GlobalVariable* GV =
1462
    new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1463
                             llvm::GlobalValue::InternalLinkage, C, "");
1464
    EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
1465
    return;
1466
  }
1467
#endif
1468
6.56k
  if (E->hadArrayRangeDesignator())
1469
0
    CGF.ErrorUnsupported(E, "GNU array range designator extension");
1470
6.56k
1471
6.56k
  if (E->isTransparent())
1472
6
    return Visit(E->getInit(0));
1473
6.55k
1474
6.55k
  AggValueSlot Dest = EnsureSlot(E->getType());
1475
6.55k
1476
6.55k
  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1477
6.55k
1478
6.55k
  // Handle initialization of an array.
1479
6.55k
  if (E->getType()->isArrayType()) {
1480
1.61k
    auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1481
1.61k
    EmitArrayInit(Dest.getAddress(), AType, E->getType(), E);
1482
1.61k
    return;
1483
1.61k
  }
1484
4.93k
1485
4.93k
  assert(E->getType()->isRecordType() && "Only support structs/unions here!");
1486
4.93k
1487
4.93k
  // Do struct initialization; this code just sets each individual member
1488
4.93k
  // to the approprate value.  This makes bitfield support automatic;
1489
4.93k
  // the disadvantage is that the generated code is more difficult for
1490
4.93k
  // the optimizer, especially with bitfields.
1491
4.93k
  unsigned NumInitElements = E->getNumInits();
1492
4.93k
  RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
1493
4.93k
1494
4.93k
  // We'll need to enter cleanup scopes in case any of the element
1495
4.93k
  // initializers throws an exception.
1496
4.93k
  SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1497
4.93k
  llvm::Instruction *cleanupDominator = nullptr;
1498
4.93k
1499
4.93k
  unsigned curInitIndex = 0;
1500
4.93k
1501
4.93k
  // Emit initialization of base classes.
1502
4.93k
  if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1503
3.16k
    assert(E->getNumInits() >= CXXRD->getNumBases() &&
1504
3.16k
           "missing initializer for base class");
1505
3.16k
    for (auto &Base : CXXRD->bases()) {
1506
12
      assert(!Base.isVirtual() && "should not see vbases here");
1507
12
      auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1508
12
      Address V = CGF.GetAddressOfDirectBaseInCompleteClass(
1509
12
          Dest.getAddress(), CXXRD, BaseRD,
1510
12
          /*isBaseVirtual*/ false);
1511
12
      AggValueSlot AggSlot = AggValueSlot::forAddr(
1512
12
          V, Qualifiers(),
1513
12
          AggValueSlot::IsDestructed,
1514
12
          AggValueSlot::DoesNotNeedGCBarriers,
1515
12
          AggValueSlot::IsNotAliased,
1516
12
          CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1517
12
      CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot);
1518
12
1519
12
      if (QualType::DestructionKind dtorKind =
1520
6
              Base.getType().isDestructedType()) {
1521
6
        CGF.pushDestroy(dtorKind, V, Base.getType());
1522
6
        cleanups.push_back(CGF.EHStack.stable_begin());
1523
6
      }
1524
12
    }
1525
3.16k
  }
1526
4.93k
1527
4.93k
  // Prepare a 'this' for CXXDefaultInitExprs.
1528
4.93k
  CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1529
4.93k
1530
4.93k
  if (record->isUnion()) {
1531
1.31k
    // Only initialize one field of a union. The field itself is
1532
1.31k
    // specified by the initializer list.
1533
1.31k
    if (!E->getInitializedFieldInUnion()) {
1534
12
      // Empty union; we have nothing to do.
1535
12
1536
#ifndef NDEBUG
1537
      // Make sure that it's really an empty and not a failure of
1538
      // semantic analysis.
1539
      for (const auto *Field : record->fields())
1540
        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
1541
#endif
1542
      return;
1543
12
    }
1544
1.30k
1545
1.30k
    // FIXME: volatility
1546
1.30k
    FieldDecl *Field = E->getInitializedFieldInUnion();
1547
1.30k
1548
1.30k
    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1549
1.30k
    if (NumInitElements) {
1550
1.29k
      // Store the initializer into the field
1551
1.29k
      EmitInitializationToLValue(E->getInit(0), FieldLoc);
1552
1.29k
    } else {
1553
4
      // Default-initialize to null.
1554
4
      EmitNullInitializationToLValue(FieldLoc);
1555
4
    }
1556
1.30k
1557
1.30k
    return;
1558
1.30k
  }
1559
3.62k
1560
3.62k
  // Here we iterate over the fields; this makes it simpler to both
1561
3.62k
  // default-initialize fields and skip over unnamed fields.
1562
10.0k
  
for (const auto *field : record->fields())3.62k
{
1563
10.0k
    // We're done once we hit the flexible array member.
1564
10.0k
    if (field->getType()->isIncompleteArrayType())
1565
0
      break;
1566
10.0k
1567
10.0k
    // Always skip anonymous bitfields.
1568
10.0k
    if (field->isUnnamedBitfield())
1569
24
      continue;
1570
10.0k
1571
10.0k
    // We're done if we reach the end of the explicit initializers, we
1572
10.0k
    // have a zeroed object, and the rest of the fields are
1573
10.0k
    // zero-initializable.
1574
10.0k
    if (curInitIndex == NumInitElements && 
Dest.isZeroed()0
&&
1575
10.0k
        
CGF.getTypes().isZeroInitializable(E->getType())0
)
1576
0
      break;
1577
10.0k
1578
10.0k
1579
10.0k
    LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1580
10.0k
    // We never generate write-barries for initialized fields.
1581
10.0k
    LV.setNonGC(true);
1582
10.0k
1583
10.0k
    if (curInitIndex < NumInitElements) {
1584
10.0k
      // Store the initializer into the field.
1585
10.0k
      EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
1586
10.0k
    } else {
1587
0
      // We're out of initializers; default-initialize to null
1588
0
      EmitNullInitializationToLValue(LV);
1589
0
    }
1590
10.0k
1591
10.0k
    // Push a destructor if necessary.
1592
10.0k
    // FIXME: if we have an array of structures, all explicitly
1593
10.0k
    // initialized, we can end up pushing a linear number of cleanups.
1594
10.0k
    bool pushedCleanup = false;
1595
10.0k
    if (QualType::DestructionKind dtorKind
1596
109
          = field->getType().isDestructedType()) {
1597
109
      assert(LV.isSimple());
1598
109
      if (CGF.needsEHCleanup(dtorKind)) {
1599
57
        if (!cleanupDominator)
1600
29
          cleanupDominator = CGF.Builder.CreateAlignedLoad(
1601
29
              CGF.Int8Ty,
1602
29
              llvm::Constant::getNullValue(CGF.Int8PtrTy),
1603
29
              CharUnits::One()); // placeholder
1604
57
1605
57
        CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
1606
57
                        CGF.getDestroyer(dtorKind), false);
1607
57
        cleanups.push_back(CGF.EHStack.stable_begin());
1608
57
        pushedCleanup = true;
1609
57
      }
1610
109
    }
1611
10.0k
1612
10.0k
    // If the GEP didn't get used because of a dead zero init or something
1613
10.0k
    // else, clean it up for -O0 builds and general tidiness.
1614
10.0k
    if (!pushedCleanup && 
LV.isSimple()10.0k
)
1615
9.99k
      if (llvm::GetElementPtrInst *GEP =
1616
9.73k
            dyn_cast<llvm::GetElementPtrInst>(LV.getPointer()))
1617
9.73k
        if (GEP->use_empty())
1618
83
          GEP->eraseFromParent();
1619
10.0k
  }
1620
3.62k
1621
3.62k
  // Deactivate all the partial cleanups in reverse order, which
1622
3.62k
  // generally means popping them.
1623
3.68k
  for (unsigned i = cleanups.size(); i != 0; 
--i63
)
1624
63
    CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
1625
3.62k
1626
3.62k
  // Destroy the placeholder if we made one.
1627
3.62k
  if (cleanupDominator)
1628
29
    cleanupDominator->eraseFromParent();
1629
3.62k
}
1630
1631
void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1632
28
                                            llvm::Value *outerBegin) {
1633
28
  // Emit the common subexpression.
1634
28
  CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1635
28
1636
28
  Address destPtr = EnsureSlot(E->getType()).getAddress();
1637
28
  uint64_t numElements = E->getArraySize().getZExtValue();
1638
28
1639
28
  if (!numElements)
1640
0
    return;
1641
28
1642
28
  // destPtr is an array*. Construct an elementType* by drilling down a level.
1643
28
  llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1644
28
  llvm::Value *indices[] = {zero, zero};
1645
28
  llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getPointer(), indices,
1646
28
                                                 "arrayinit.begin");
1647
28
1648
28
  // Prepare to special-case multidimensional array initialization: we avoid
1649
28
  // emitting multiple destructor loops in that case.
1650
28
  if (!outerBegin)
1651
21
    outerBegin = begin;
1652
28
  ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1653
28
1654
28
  QualType elementType =
1655
28
      CGF.getContext().getAsArrayType(E->getType())->getElementType();
1656
28
  CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1657
28
  CharUnits elementAlign =
1658
28
      destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1659
28
1660
28
  llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1661
28
  llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1662
28
1663
28
  // Jump into the body.
1664
28
  CGF.EmitBlock(bodyBB);
1665
28
  llvm::PHINode *index =
1666
28
      Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1667
28
  index->addIncoming(zero, entryBB);
1668
28
  llvm::Value *element = Builder.CreateInBoundsGEP(begin, index);
1669
28
1670
28
  // Prepare for a cleanup.
1671
28
  QualType::DestructionKind dtorKind = elementType.isDestructedType();
1672
28
  EHScopeStack::stable_iterator cleanup;
1673
28
  if (CGF.needsEHCleanup(dtorKind) && 
!InnerLoop6
) {
1674
5
    if (outerBegin->getType() != element->getType())
1675
1
      outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1676
5
    CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1677
5
                                       elementAlign,
1678
5
                                       CGF.getDestroyer(dtorKind));
1679
5
    cleanup = CGF.EHStack.stable_begin();
1680
23
  } else {
1681
23
    dtorKind = QualType::DK_none;
1682
23
  }
1683
28
1684
28
  // Emit the actual filler expression.
1685
28
  {
1686
28
    // Temporaries created in an array initialization loop are destroyed
1687
28
    // at the end of each iteration.
1688
28
    CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1689
28
    CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1690
28
    LValue elementLV =
1691
28
        CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
1692
28
1693
28
    if (InnerLoop) {
1694
7
      // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1695
7
      auto elementSlot = AggValueSlot::forLValue(
1696
7
          elementLV, AggValueSlot::IsDestructed,
1697
7
          AggValueSlot::DoesNotNeedGCBarriers,
1698
7
          AggValueSlot::IsNotAliased,
1699
7
          AggValueSlot::DoesNotOverlap);
1700
7
      AggExprEmitter(CGF, elementSlot, false)
1701
7
          .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
1702
7
    } else
1703
21
      EmitInitializationToLValue(E->getSubExpr(), elementLV);
1704
28
  }
1705
28
1706
28
  // Move on to the next element.
1707
28
  llvm::Value *nextIndex = Builder.CreateNUWAdd(
1708
28
      index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
1709
28
  index->addIncoming(nextIndex, Builder.GetInsertBlock());
1710
28
1711
28
  // Leave the loop if we're done.
1712
28
  llvm::Value *done = Builder.CreateICmpEQ(
1713
28
      nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
1714
28
      "arrayinit.done");
1715
28
  llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
1716
28
  Builder.CreateCondBr(done, endBB, bodyBB);
1717
28
1718
28
  CGF.EmitBlock(endBB);
1719
28
1720
28
  // Leave the partial-array cleanup if we entered one.
1721
28
  if (dtorKind)
1722
5
    CGF.DeactivateCleanupBlock(cleanup, index);
1723
28
}
1724
1725
8
void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1726
8
  AggValueSlot Dest = EnsureSlot(E->getType());
1727
8
1728
8
  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1729
8
  EmitInitializationToLValue(E->getBase(), DestLV);
1730
8
  VisitInitListExpr(E->getUpdater());
1731
8
}
1732
1733
//===----------------------------------------------------------------------===//
1734
//                        Entry Points into this File
1735
//===----------------------------------------------------------------------===//
1736
1737
/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1738
/// non-zero bytes that will be stored when outputting the initializer for the
1739
/// specified initializer expression.
1740
17.7k
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1741
17.7k
  E = E->IgnoreParens();
1742
17.7k
1743
17.7k
  // 0 and 0.0 won't require any non-zero stores!
1744
17.7k
  if (isSimpleZero(E, CGF)) 
return CharUnits::Zero()328
;
1745
17.4k
1746
17.4k
  // If this is an initlist expr, sum up the size of sizes of the (present)
1747
17.4k
  // elements.  If this is something weird, assume the whole thing is non-zero.
1748
17.4k
  const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1749
17.4k
  while (ILE && 
ILE->isTransparent()2.67k
)
1750
2
    ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
1751
17.4k
  if (!ILE || 
!CGF.getTypes().isZeroInitializable(ILE->getType())2.67k
)
1752
14.7k
    return CGF.getContext().getTypeSizeInChars(E->getType());
1753
2.67k
1754
2.67k
  // InitListExprs for structs have to be handled carefully.  If there are
1755
2.67k
  // reference members, we need to consider the size of the reference, not the
1756
2.67k
  // referencee.  InitListExprs for unions and arrays can't have references.
1757
2.67k
  if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1758
2.55k
    if (!RT->isUnionType()) {
1759
2.55k
      RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
1760
2.55k
      CharUnits NumNonZeroBytes = CharUnits::Zero();
1761
2.55k
1762
2.55k
      unsigned ILEElement = 0;
1763
2.55k
      if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1764
2.38k
        while (ILEElement != CXXRD->getNumBases())
1765
0
          NumNonZeroBytes +=
1766
0
              GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1767
8.29k
      for (const auto *Field : SD->fields()) {
1768
8.29k
        // We're done once we hit the flexible array member or run out of
1769
8.29k
        // InitListExpr elements.
1770
8.29k
        if (Field->getType()->isIncompleteArrayType() ||
1771
8.29k
            ILEElement == ILE->getNumInits())
1772
0
          break;
1773
8.29k
        if (Field->isUnnamedBitfield())
1774
0
          continue;
1775
8.29k
1776
8.29k
        const Expr *E = ILE->getInit(ILEElement++);
1777
8.29k
1778
8.29k
        // Reference values are always non-null and have the width of a pointer.
1779
8.29k
        if (Field->getType()->isReferenceType())
1780
4
          NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1781
4
              CGF.getTarget().getPointerWidth(0));
1782
8.28k
        else
1783
8.28k
          NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1784
8.29k
      }
1785
2.55k
1786
2.55k
      return NumNonZeroBytes;
1787
2.55k
    }
1788
123
  }
1789
123
1790
123
1791
123
  CharUnits NumNonZeroBytes = CharUnits::Zero();
1792
571
  for (unsigned i = 0, e = ILE->getNumInits(); i != e; 
++i448
)
1793
448
    NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1794
123
  return NumNonZeroBytes;
1795
123
}
1796
1797
/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1798
/// zeros in it, emit a memset and avoid storing the individual zeros.
1799
///
1800
static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1801
150k
                                     CodeGenFunction &CGF) {
1802
150k
  // If the slot is already known to be zeroed, nothing to do.  Don't mess with
1803
150k
  // volatile stores.
1804
150k
  if (Slot.isZeroed() || 
Slot.isVolatile()150k
||
!Slot.getAddress().isValid()150k
)
1805
2.32k
    return;
1806
148k
1807
148k
  // C++ objects with a user-declared constructor don't need zero'ing.
1808
148k
  if (CGF.getLangOpts().CPlusPlus)
1809
140k
    if (const RecordType *RT = CGF.getContext()
1810
140k
                       .getBaseElementType(E->getType())->getAs<RecordType>()) {
1811
140k
      const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1812
140k
      if (RD->hasUserDeclaredConstructor())
1813
112k
        return;
1814
35.1k
    }
1815
35.1k
1816
35.1k
  // If the type is 16-bytes or smaller, prefer individual stores over memset.
1817
35.1k
  CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
1818
35.1k
  if (Size <= CharUnits::fromQuantity(16))
1819
26.1k
    return;
1820
9.02k
1821
9.02k
  // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1822
9.02k
  // we prefer to emit memset + individual stores for the rest.
1823
9.02k
  CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1824
9.02k
  if (NumNonZeroBytes*4 > Size)
1825
8.99k
    return;
1826
33
1827
33
  // Okay, it seems like a good idea to use an initial memset, emit the call.
1828
33
  llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
1829
33
1830
33
  Address Loc = Slot.getAddress();
1831
33
  Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
1832
33
  CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
1833
33
1834
33
  // Tell the AggExprEmitter that the slot is known zero.
1835
33
  Slot.setZeroed();
1836
33
}
1837
1838
1839
1840
1841
/// EmitAggExpr - Emit the computation of the specified expression of aggregate
1842
/// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1843
/// the value of the aggregate expression is not needed.  If VolatileDest is
1844
/// true, DestPtr cannot be 0.
1845
150k
void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
1846
150k
  assert(E && hasAggregateEvaluationKind(E->getType()) &&
1847
150k
         "Invalid aggregate expression to emit");
1848
150k
  assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
1849
150k
         "slot has bits but no address");
1850
150k
1851
150k
  // Optimize the slot if possible.
1852
150k
  CheckAggExprForMemSetUse(Slot, E, *this);
1853
150k
1854
150k
  AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
1855
150k
}
1856
1857
74
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1858
74
  assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
1859
74
  Address Temp = CreateMemTemp(E->getType());
1860
74
  LValue LV = MakeAddrLValue(Temp, E->getType());
1861
74
  EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
1862
74
                                         AggValueSlot::DoesNotNeedGCBarriers,
1863
74
                                         AggValueSlot::IsNotAliased,
1864
74
                                         AggValueSlot::DoesNotOverlap));
1865
74
  return LV;
1866
74
}
1867
1868
AggValueSlot::Overlap_t
1869
12.8k
CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) {
1870
12.8k
  if (!FD->hasAttr<NoUniqueAddressAttr>() || 
!FD->getType()->isRecordType()2
)
1871
12.8k
    return AggValueSlot::DoesNotOverlap;
1872
2
1873
2
  // If the field lies entirely within the enclosing class's nvsize, its tail
1874
2
  // padding cannot overlap any already-initialized object. (The only subobjects
1875
2
  // with greater addresses that might already be initialized are vbases.)
1876
2
  const RecordDecl *ClassRD = FD->getParent();
1877
2
  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
1878
2
  if (Layout.getFieldOffset(FD->getFieldIndex()) +
1879
2
          getContext().getTypeSize(FD->getType()) <=
1880
2
      (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
1881
0
    return AggValueSlot::DoesNotOverlap;
1882
2
1883
2
  // The tail padding may contain values we need to preserve.
1884
2
  return AggValueSlot::MayOverlap;
1885
2
}
1886
1887
AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit(
1888
14.5k
    const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
1889
14.5k
  // If the most-derived object is a field declared with [[no_unique_address]],
1890
14.5k
  // the tail padding of any virtual base could be reused for other subobjects
1891
14.5k
  // of that field's class.
1892
14.5k
  if (IsVirtual)
1893
856
    return AggValueSlot::MayOverlap;
1894
13.7k
1895
13.7k
  // If the base class is laid out entirely within the nvsize of the derived
1896
13.7k
  // class, its tail padding cannot yet be initialized, so we can issue
1897
13.7k
  // stores at the full width of the base class.
1898
13.7k
  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1899
13.7k
  if (Layout.getBaseClassOffset(BaseRD) +
1900
13.7k
          getContext().getASTRecordLayout(BaseRD).getSize() <=
1901
13.7k
      Layout.getNonVirtualSize())
1902
13.3k
    return AggValueSlot::DoesNotOverlap;
1903
399
1904
399
  // The tail padding may contain values we need to preserve.
1905
399
  return AggValueSlot::MayOverlap;
1906
399
}
1907
1908
void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
1909
                                        AggValueSlot::Overlap_t MayOverlap,
1910
23.4k
                                        bool isVolatile) {
1911
23.4k
  assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1912
23.4k
1913
23.4k
  Address DestPtr = Dest.getAddress();
1914
23.4k
  Address SrcPtr = Src.getAddress();
1915
23.4k
1916
23.4k
  if (getLangOpts().CPlusPlus) {
1917
19.7k
    if (const RecordType *RT = Ty->getAs<RecordType>()) {
1918
18.7k
      CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1919
18.7k
      assert((Record->hasTrivialCopyConstructor() ||
1920
18.7k
              Record->hasTrivialCopyAssignment() ||
1921
18.7k
              Record->hasTrivialMoveConstructor() ||
1922
18.7k
              Record->hasTrivialMoveAssignment() ||
1923
18.7k
              Record->isUnion()) &&
1924
18.7k
             "Trying to aggregate-copy a type without a trivial copy/move "
1925
18.7k
             "constructor or assignment operator");
1926
18.7k
      // Ignore empty classes in C++.
1927
18.7k
      if (Record->isEmpty())
1928
3.72k
        return;
1929
19.7k
    }
1930
19.7k
  }
1931
19.7k
1932
19.7k
  // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
1933
19.7k
  // C99 6.5.16.1p3, which states "If the value being stored in an object is
1934
19.7k
  // read from another object that overlaps in anyway the storage of the first
1935
19.7k
  // object, then the overlap shall be exact and the two objects shall have
1936
19.7k
  // qualified or unqualified versions of a compatible type."
1937
19.7k
  //
1938
19.7k
  // memcpy is not defined if the source and destination pointers are exactly
1939
19.7k
  // equal, but other compilers do this optimization, and almost every memcpy
1940
19.7k
  // implementation handles this case safely.  If there is a libc that does not
1941
19.7k
  // safely handle this, we can add a target hook.
1942
19.7k
1943
19.7k
  // Get data size info for this aggregate. Don't copy the tail padding if this
1944
19.7k
  // might be a potentially-overlapping subobject, since the tail padding might
1945
19.7k
  // be occupied by a different object. Otherwise, copying it is fine.
1946
19.7k
  std::pair<CharUnits, CharUnits> TypeInfo;
1947
19.7k
  if (MayOverlap)
1948
7.16k
    TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
1949
12.5k
  else
1950
12.5k
    TypeInfo = getContext().getTypeInfoInChars(Ty);
1951
19.7k
1952
19.7k
  llvm::Value *SizeVal = nullptr;
1953
19.7k
  if (TypeInfo.first.isZero()) {
1954
94
    // But note that getTypeInfo returns 0 for a VLA.
1955
94
    if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
1956
40
            getContext().getAsArrayType(Ty))) {
1957
40
      QualType BaseEltTy;
1958
40
      SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
1959
40
      TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
1960
40
      assert(!TypeInfo.first.isZero());
1961
40
      SizeVal = Builder.CreateNUWMul(
1962
40
          SizeVal,
1963
40
          llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1964
40
    }
1965
94
  }
1966
19.7k
  if (!SizeVal) {
1967
19.6k
    SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
1968
19.6k
  }
1969
19.7k
1970
19.7k
  // FIXME: If we have a volatile struct, the optimizer can remove what might
1971
19.7k
  // appear to be `extra' memory ops:
1972
19.7k
  //
1973
19.7k
  // volatile struct { int i; } a, b;
1974
19.7k
  //
1975
19.7k
  // int main() {
1976
19.7k
  //   a = b;
1977
19.7k
  //   a = b;
1978
19.7k
  // }
1979
19.7k
  //
1980
19.7k
  // we need to use a different call here.  We use isVolatile to indicate when
1981
19.7k
  // either the source or the destination is volatile.
1982
19.7k
1983
19.7k
  DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1984
19.7k
  SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
1985
19.7k
1986
19.7k
  // Don't do any of the memmove_collectable tests if GC isn't set.
1987
19.7k
  if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
1988
19.7k
    // fall through
1989
19.7k
  } else 
if (const RecordType *30
RecordTy30
= Ty->getAs<RecordType>()) {
1990
28
    RecordDecl *Record = RecordTy->getDecl();
1991
28
    if (Record->hasObjectMember()) {
1992
23
      CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1993
23
                                                    SizeVal);
1994
23
      return;
1995
23
    }
1996
2
  } else if (Ty->isArrayType()) {
1997
2
    QualType BaseType = getContext().getBaseElementType(Ty);
1998
2
    if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1999
1
      if (RecordTy->getDecl()->hasObjectMember()) {
2000
1
        CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2001
1
                                                      SizeVal);
2002
1
        return;
2003
1
      }
2004
19.7k
    }
2005
2
  }
2006
19.7k
2007
19.7k
  auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
2008
19.7k
2009
19.7k
  // Determine the metadata to describe the position of any padding in this
2010
19.7k
  // memcpy, as well as the TBAA tags for the members of the struct, in case
2011
19.7k
  // the optimizer wishes to expand it in to scalar memory operations.
2012
19.7k
  if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
2013
11.6k
    Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
2014
19.7k
2015
19.7k
  if (CGM.getCodeGenOpts().NewStructPathTBAA) {
2016
7
    TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer(
2017
7
        Dest.getTBAAInfo(), Src.getTBAAInfo());
2018
7
    CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
2019
7
  }
2020
19.7k
}