Coverage Report

Created: 2020-09-22 08:39

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
Line
Count
Source (jump to first uncovered line)
1
//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This provides C++ code generation targeting the Itanium C++ ABI.  The class
10
// in this file generates structures that follow the Itanium C++ ABI, which is
11
// documented at:
12
//  https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13
//  https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14
//
15
// It also supports the closely-related ARM ABI, documented at:
16
// https://developer.arm.com/documentation/ihi0041/g/
17
//
18
//===----------------------------------------------------------------------===//
19
20
#include "CGCXXABI.h"
21
#include "CGCleanup.h"
22
#include "CGRecordLayout.h"
23
#include "CGVTables.h"
24
#include "CodeGenFunction.h"
25
#include "CodeGenModule.h"
26
#include "TargetInfo.h"
27
#include "clang/AST/Attr.h"
28
#include "clang/AST/Mangle.h"
29
#include "clang/AST/StmtCXX.h"
30
#include "clang/AST/Type.h"
31
#include "clang/CodeGen/ConstantInitBuilder.h"
32
#include "llvm/IR/DataLayout.h"
33
#include "llvm/IR/GlobalValue.h"
34
#include "llvm/IR/Instructions.h"
35
#include "llvm/IR/Intrinsics.h"
36
#include "llvm/IR/Value.h"
37
#include "llvm/Support/ScopedPrinter.h"
38
39
using namespace clang;
40
using namespace CodeGen;
41
42
namespace {
43
class ItaniumCXXABI : public CodeGen::CGCXXABI {
44
  /// VTables - All the vtables which have been defined.
45
  llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
46
47
  /// All the thread wrapper functions that have been used.
48
  llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
49
      ThreadWrappers;
50
51
protected:
52
  bool UseARMMethodPtrABI;
53
  bool UseARMGuardVarABI;
54
  bool Use32BitVTableOffsetABI;
55
56
9.98k
  ItaniumMangleContext &getMangleContext() {
57
9.98k
    return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
58
9.98k
  }
59
60
public:
61
  ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
62
                bool UseARMMethodPtrABI = false,
63
                bool UseARMGuardVarABI = false) :
64
    CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
65
    UseARMGuardVarABI(UseARMGuardVarABI),
66
29.0k
    Use32BitVTableOffsetABI(false) { }
67
68
  bool classifyReturnType(CGFunctionInfo &FI) const override;
69
70
121k
  RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
71
    // If C++ prohibits us from making a copy, pass by address.
72
121k
    if (!RD->canPassInRegisters())
73
12.4k
      return RAA_Indirect;
74
108k
    return RAA_Default;
75
108k
  }
76
77
991
  bool isThisCompleteObject(GlobalDecl GD) const override {
78
    // The Itanium ABI has separate complete-object vs.  base-object
79
    // variants of both constructors and destructors.
80
991
    if (isa<CXXDestructorDecl>(GD.getDecl())) {
81
378
      switch (GD.getDtorType()) {
82
277
      case Dtor_Complete:
83
277
      case Dtor_Deleting:
84
277
        return true;
85
86
101
      case Dtor_Base:
87
101
        return false;
88
89
0
      case Dtor_Comdat:
90
0
        llvm_unreachable("emitting dtor comdat as function?");
91
0
      }
92
0
      llvm_unreachable("bad dtor kind");
93
0
    }
94
613
    if (isa<CXXConstructorDecl>(GD.getDecl())) {
95
359
      switch (GD.getCtorType()) {
96
209
      case Ctor_Complete:
97
209
        return true;
98
99
150
      case Ctor_Base:
100
150
        return false;
101
102
0
      case Ctor_CopyingClosure:
103
0
      case Ctor_DefaultClosure:
104
0
        llvm_unreachable("closure ctors in Itanium ABI?");
105
106
0
      case Ctor_Comdat:
107
0
        llvm_unreachable("emitting ctor comdat as function?");
108
0
      }
109
0
      llvm_unreachable("bad dtor kind");
110
0
    }
111
112
    // No other kinds.
113
254
    return false;
114
254
  }
115
116
  bool isZeroInitializable(const MemberPointerType *MPT) override;
117
118
  llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
119
120
  CGCallee
121
    EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
122
                                    const Expr *E,
123
                                    Address This,
124
                                    llvm::Value *&ThisPtrForCall,
125
                                    llvm::Value *MemFnPtr,
126
                                    const MemberPointerType *MPT) override;
127
128
  llvm::Value *
129
    EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
130
                                 Address Base,
131
                                 llvm::Value *MemPtr,
132
                                 const MemberPointerType *MPT) override;
133
134
  llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
135
                                           const CastExpr *E,
136
                                           llvm::Value *Src) override;
137
  llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
138
                                              llvm::Constant *Src) override;
139
140
  llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
141
142
  llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
143
  llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
144
                                        CharUnits offset) override;
145
  llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
146
  llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
147
                                     CharUnits ThisAdjustment);
148
149
  llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
150
                                           llvm::Value *L, llvm::Value *R,
151
                                           const MemberPointerType *MPT,
152
                                           bool Inequality) override;
153
154
  llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
155
                                         llvm::Value *Addr,
156
                                         const MemberPointerType *MPT) override;
157
158
  void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
159
                               Address Ptr, QualType ElementType,
160
                               const CXXDestructorDecl *Dtor) override;
161
162
  void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
163
  void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
164
165
  void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
166
167
  llvm::CallInst *
168
  emitTerminateForUnexpectedException(CodeGenFunction &CGF,
169
                                      llvm::Value *Exn) override;
170
171
  void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
172
  llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
173
  CatchTypeInfo
174
  getAddrOfCXXCatchHandlerType(QualType Ty,
175
160
                               QualType CatchHandlerType) override {
176
160
    return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
177
160
  }
178
179
  bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
180
  void EmitBadTypeidCall(CodeGenFunction &CGF) override;
181
  llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
182
                          Address ThisPtr,
183
                          llvm::Type *StdTypeInfoPtrTy) override;
184
185
  bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
186
                                          QualType SrcRecordTy) override;
187
188
  llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
189
                                   QualType SrcRecordTy, QualType DestTy,
190
                                   QualType DestRecordTy,
191
                                   llvm::BasicBlock *CastEnd) override;
192
193
  llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
194
                                     QualType SrcRecordTy,
195
                                     QualType DestTy) override;
196
197
  bool EmitBadCastCall(CodeGenFunction &CGF) override;
198
199
  llvm::Value *
200
    GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
201
                              const CXXRecordDecl *ClassDecl,
202
                              const CXXRecordDecl *BaseClassDecl) override;
203
204
  void EmitCXXConstructors(const CXXConstructorDecl *D) override;
205
206
  AddedStructorArgCounts
207
  buildStructorSignature(GlobalDecl GD,
208
                         SmallVectorImpl<CanQualType> &ArgTys) override;
209
210
  bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
211
17.1k
                              CXXDtorType DT) const override {
212
    // Itanium does not emit any destructor variant as an inline thunk.
213
    // Delegating may occur as an optimization, but all variants are either
214
    // emitted with external linkage or as linkonce if they are inline and used.
215
17.1k
    return false;
216
17.1k
  }
217
218
  void EmitCXXDestructors(const CXXDestructorDecl *D) override;
219
220
  void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
221
                                 FunctionArgList &Params) override;
222
223
  void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
224
225
  AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
226
                                               const CXXConstructorDecl *D,
227
                                               CXXCtorType Type,
228
                                               bool ForVirtualBase,
229
                                               bool Delegating) override;
230
231
  llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
232
                                             const CXXDestructorDecl *DD,
233
                                             CXXDtorType Type,
234
                                             bool ForVirtualBase,
235
                                             bool Delegating) override;
236
237
  void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
238
                          CXXDtorType Type, bool ForVirtualBase,
239
                          bool Delegating, Address This,
240
                          QualType ThisTy) override;
241
242
  void emitVTableDefinitions(CodeGenVTables &CGVT,
243
                             const CXXRecordDecl *RD) override;
244
245
  bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
246
                                           CodeGenFunction::VPtr Vptr) override;
247
248
2.07k
  bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
249
2.07k
    return true;
250
2.07k
  }
251
252
  llvm::Constant *
253
  getVTableAddressPoint(BaseSubobject Base,
254
                        const CXXRecordDecl *VTableClass) override;
255
256
  llvm::Value *getVTableAddressPointInStructor(
257
      CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258
      BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
259
260
  llvm::Value *getVTableAddressPointInStructorWithVTT(
261
      CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
262
      BaseSubobject Base, const CXXRecordDecl *NearestVBase);
263
264
  llvm::Constant *
265
  getVTableAddressPointForConstExpr(BaseSubobject Base,
266
                                    const CXXRecordDecl *VTableClass) override;
267
268
  llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
269
                                        CharUnits VPtrOffset) override;
270
271
  CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
272
                                     Address This, llvm::Type *Ty,
273
                                     SourceLocation Loc) override;
274
275
  llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
276
                                         const CXXDestructorDecl *Dtor,
277
                                         CXXDtorType DtorType, Address This,
278
                                         DeleteOrMemberCallExpr E) override;
279
280
  void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
281
282
  bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
283
  bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
284
285
  void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
286
369
                       bool ReturnAdjustment) override {
287
    // Allow inlining of thunks by emitting them with available_externally
288
    // linkage together with vtables when needed.
289
369
    if (ForVTable && 
!Thunk->hasLocalLinkage()43
)
290
33
      Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
291
369
    CGM.setGVProperties(Thunk, GD);
292
369
  }
293
294
369
  bool exportThunk() override { return true; }
295
296
  llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
297
                                     const ThisAdjustment &TA) override;
298
299
  llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
300
                                       const ReturnAdjustment &RA) override;
301
302
  size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
303
231
                              FunctionArgList &Args) const override {
304
231
    assert(!Args.empty() && "expected the arglist to not be empty!");
305
231
    return Args.size() - 1;
306
231
  }
307
308
58
  StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
309
  StringRef GetDeletedVirtualCallName() override
310
3
    { return "__cxa_deleted_virtual"; }
311
312
  CharUnits getArrayCookieSizeImpl(QualType elementType) override;
313
  Address InitializeArrayCookie(CodeGenFunction &CGF,
314
                                Address NewPtr,
315
                                llvm::Value *NumElements,
316
                                const CXXNewExpr *expr,
317
                                QualType ElementType) override;
318
  llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
319
                                   Address allocPtr,
320
                                   CharUnits cookieSize) override;
321
322
  void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
323
                       llvm::GlobalVariable *DeclPtr,
324
                       bool PerformInit) override;
325
  void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
326
                          llvm::FunctionCallee dtor,
327
                          llvm::Constant *addr) override;
328
329
  llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
330
                                                llvm::Value *Val);
331
  void EmitThreadLocalInitFuncs(
332
      CodeGenModule &CGM,
333
      ArrayRef<const VarDecl *> CXXThreadLocals,
334
      ArrayRef<llvm::Function *> CXXThreadLocalInits,
335
      ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
336
337
  /// Determine whether we will definitely emit this variable with a constant
338
  /// initializer, either because the language semantics demand it or because
339
  /// we know that the initializer is a constant.
340
442
  bool isEmittedWithConstantInitializer(const VarDecl *VD) const {
341
442
    VD = VD->getMostRecentDecl();
342
442
    if (VD->hasAttr<ConstInitAttr>())
343
9
      return true;
344
345
    // All later checks examine the initializer specified on the variable. If
346
    // the variable is weak, such examination would not be correct.
347
433
    if (VD->isWeak() || VD->hasAttr<SelectAnyAttr>())
348
0
      return false;
349
350
433
    const VarDecl *InitDecl = VD->getInitializingDeclaration();
351
433
    if (!InitDecl)
352
150
      return false;
353
354
    // If there's no initializer to run, this is constant initialization.
355
283
    if (!InitDecl->hasInit())
356
39
      return true;
357
358
    // If we have the only definition, we don't need a thread wrapper if we
359
    // will emit the value as a constant.
360
244
    if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
361
164
      return !VD->needsDestruction(getContext()) && 
InitDecl->evaluateValue()103
;
362
363
    // Otherwise, we need a thread wrapper unless we know that every
364
    // translation unit will emit the value as a constant. We rely on
365
    // ICE-ness not varying between translation units, which isn't actually
366
    // guaranteed by the standard but is necessary for sanity.
367
80
    return InitDecl->isInitKnownICE() && 
InitDecl->isInitICE()54
;
368
80
  }
369
370
442
  bool usesThreadWrapperFunction(const VarDecl *VD) const override {
371
442
    return !isEmittedWithConstantInitializer(VD) ||
372
125
           VD->needsDestruction(getContext());
373
442
  }
374
  LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
375
                                      QualType LValType) override;
376
377
  bool NeedsVTTParameter(GlobalDecl GD) override;
378
379
  /**************************** RTTI Uniqueness ******************************/
380
381
protected:
382
  /// Returns true if the ABI requires RTTI type_info objects to be unique
383
  /// across a program.
384
4.62k
  virtual bool shouldRTTIBeUnique() const { return true; }
385
386
public:
387
  /// What sort of unique-RTTI behavior should we use?
388
  enum RTTIUniquenessKind {
389
    /// We are guaranteeing, or need to guarantee, that the RTTI string
390
    /// is unique.
391
    RUK_Unique,
392
393
    /// We are not guaranteeing uniqueness for the RTTI string, so we
394
    /// can demote to hidden visibility but must use string comparisons.
395
    RUK_NonUniqueHidden,
396
397
    /// We are not guaranteeing uniqueness for the RTTI string, so we
398
    /// have to use string comparisons, but we also have to emit it with
399
    /// non-hidden visibility.
400
    RUK_NonUniqueVisible
401
  };
402
403
  /// Return the required visibility status for the given type and linkage in
404
  /// the current ABI.
405
  RTTIUniquenessKind
406
  classifyRTTIUniqueness(QualType CanTy,
407
                         llvm::GlobalValue::LinkageTypes Linkage) const;
408
  friend class ItaniumRTTIBuilder;
409
410
  void emitCXXStructor(GlobalDecl GD) override;
411
412
  std::pair<llvm::Value *, const CXXRecordDecl *>
413
  LoadVTablePtr(CodeGenFunction &CGF, Address This,
414
                const CXXRecordDecl *RD) override;
415
416
 private:
417
359
   bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
418
359
     const auto &VtableLayout =
419
359
         CGM.getItaniumVTableContext().getVTableLayout(RD);
420
421
1.50k
     for (const auto &VtableComponent : VtableLayout.vtable_components()) {
422
       // Skip empty slot.
423
1.50k
       if (!VtableComponent.isUsedFunctionPointerKind())
424
889
         continue;
425
426
613
       const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
427
613
       if (!Method->getCanonicalDecl()->isInlined())
428
453
         continue;
429
430
160
       StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
431
160
       auto *Entry = CGM.GetGlobalValue(Name);
432
       // This checks if virtual inline function has already been emitted.
433
       // Note that it is possible that this inline function would be emitted
434
       // after trying to emit vtable speculatively. Because of this we do
435
       // an extra pass after emitting all deferred vtables to find and emit
436
       // these vtables opportunistically.
437
160
       if (!Entry || 
Entry->isDeclaration()50
)
438
149
         return true;
439
160
     }
440
210
     return false;
441
359
  }
442
443
430
  bool isVTableHidden(const CXXRecordDecl *RD) const {
444
430
    const auto &VtableLayout =
445
430
            CGM.getItaniumVTableContext().getVTableLayout(RD);
446
447
2.06k
    for (const auto &VtableComponent : VtableLayout.vtable_components()) {
448
2.06k
      if (VtableComponent.isRTTIKind()) {
449
475
        const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
450
475
        if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
451
12
          return true;
452
1.59k
      } else if (VtableComponent.isUsedFunctionPointerKind()) {
453
969
        const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
454
969
        if (Method->getVisibility() == Visibility::HiddenVisibility &&
455
7
            !Method->isDefined())
456
7
          return true;
457
969
      }
458
2.06k
    }
459
411
    return false;
460
430
  }
461
};
462
463
class ARMCXXABI : public ItaniumCXXABI {
464
public:
465
  ARMCXXABI(CodeGen::CodeGenModule &CGM) :
466
    ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
467
641
                  /*UseARMGuardVarABI=*/true) {}
468
469
16.2k
  bool HasThisReturn(GlobalDecl GD) const override {
470
16.2k
    return (isa<CXXConstructorDecl>(GD.getDecl()) || (
471
14.5k
              isa<CXXDestructorDecl>(GD.getDecl()) &&
472
1.27k
              GD.getDtorType() != Dtor_Deleting));
473
16.2k
  }
474
475
  void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
476
                           QualType ResTy) override;
477
478
  CharUnits getArrayCookieSizeImpl(QualType elementType) override;
479
  Address InitializeArrayCookie(CodeGenFunction &CGF,
480
                                Address NewPtr,
481
                                llvm::Value *NumElements,
482
                                const CXXNewExpr *expr,
483
                                QualType ElementType) override;
484
  llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
485
                                   CharUnits cookieSize) override;
486
};
487
488
class iOS64CXXABI : public ARMCXXABI {
489
public:
490
82
  iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
491
82
    Use32BitVTableOffsetABI = true;
492
82
  }
493
494
  // ARM64 libraries are prepared for non-unique RTTI.
495
40
  bool shouldRTTIBeUnique() const override { return false; }
496
};
497
498
class FuchsiaCXXABI final : public ItaniumCXXABI {
499
public:
500
  explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
501
34
      : ItaniumCXXABI(CGM) {}
502
503
private:
504
774
  bool HasThisReturn(GlobalDecl GD) const override {
505
774
    return isa<CXXConstructorDecl>(GD.getDecl()) ||
506
508
           (isa<CXXDestructorDecl>(GD.getDecl()) &&
507
284
            GD.getDtorType() != Dtor_Deleting);
508
774
  }
509
};
510
511
class WebAssemblyCXXABI final : public ItaniumCXXABI {
512
public:
513
  explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
514
      : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
515
43
                      /*UseARMGuardVarABI=*/true) {}
516
  void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
517
518
private:
519
1.14k
  bool HasThisReturn(GlobalDecl GD) const override {
520
1.14k
    return isa<CXXConstructorDecl>(GD.getDecl()) ||
521
1.05k
           (isa<CXXDestructorDecl>(GD.getDecl()) &&
522
445
            GD.getDtorType() != Dtor_Deleting);
523
1.14k
  }
524
1
  bool canCallMismatchedFunctionType() const override { return false; }
525
};
526
527
class XLCXXABI final : public ItaniumCXXABI {
528
public:
529
  explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
530
34
      : ItaniumCXXABI(CGM) {}
531
532
  void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
533
                          llvm::FunctionCallee dtor,
534
                          llvm::Constant *addr) override;
535
536
26
  bool useSinitAndSterm() const override { return true; }
537
538
private:
539
  void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
540
                             llvm::Constant *addr);
541
};
542
}
543
544
29.0k
CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
545
29.0k
  switch (CGM.getTarget().getCXXABI().getKind()) {
546
  // For IR-generation purposes, there's no significant difference
547
  // between the ARM and iOS ABIs.
548
559
  case TargetCXXABI::GenericARM:
549
559
  case TargetCXXABI::iOS:
550
559
  case TargetCXXABI::WatchOS:
551
559
    return new ARMCXXABI(CGM);
552
553
82
  case TargetCXXABI::iOS64:
554
82
    return new iOS64CXXABI(CGM);
555
556
34
  case TargetCXXABI::Fuchsia:
557
34
    return new FuchsiaCXXABI(CGM);
558
559
  // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
560
  // include the other 32-bit ARM oddities: constructor/destructor return values
561
  // and array cookies.
562
1.32k
  case TargetCXXABI::GenericAArch64:
563
1.32k
    return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
564
1.32k
                             /*UseARMGuardVarABI=*/true);
565
566
161
  case TargetCXXABI::GenericMIPS:
567
161
    return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
568
569
43
  case TargetCXXABI::WebAssembly:
570
43
    return new WebAssemblyCXXABI(CGM);
571
572
34
  case TargetCXXABI::XL:
573
34
    return new XLCXXABI(CGM);
574
575
26.8k
  case TargetCXXABI::GenericItanium:
576
26.8k
    if (CGM.getContext().getTargetInfo().getTriple().getArch()
577
14
        == llvm::Triple::le32) {
578
      // For PNaCl, use ARM-style method pointers so that PNaCl code
579
      // does not assume anything about the alignment of function
580
      // pointers.
581
14
      return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
582
14
    }
583
26.8k
    return new ItaniumCXXABI(CGM);
584
585
0
  case TargetCXXABI::Microsoft:
586
0
    llvm_unreachable("Microsoft ABI is not Itanium-based");
587
0
  }
588
0
  llvm_unreachable("bad ABI kind");
589
0
}
590
591
llvm::Type *
592
424
ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
593
424
  if (MPT->isMemberDataPointer())
594
85
    return CGM.PtrDiffTy;
595
339
  return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
596
339
}
597
598
/// In the Itanium and ARM ABIs, method pointers have the form:
599
///   struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
600
///
601
/// In the Itanium ABI:
602
///  - method pointers are virtual if (memptr.ptr & 1) is nonzero
603
///  - the this-adjustment is (memptr.adj)
604
///  - the virtual offset is (memptr.ptr - 1)
605
///
606
/// In the ARM ABI:
607
///  - method pointers are virtual if (memptr.adj & 1) is nonzero
608
///  - the this-adjustment is (memptr.adj >> 1)
609
///  - the virtual offset is (memptr.ptr)
610
/// ARM uses 'adj' for the virtual flag because Thumb functions
611
/// may be only single-byte aligned.
612
///
613
/// If the member is virtual, the adjusted 'this' pointer points
614
/// to a vtable pointer from which the virtual offset is applied.
615
///
616
/// If the member is non-virtual, memptr.ptr is the address of
617
/// the function to call.
618
CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
619
    CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
620
    llvm::Value *&ThisPtrForCall,
621
89
    llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
622
89
  CGBuilderTy &Builder = CGF.Builder;
623
624
89
  const FunctionProtoType *FPT =
625
89
    MPT->getPointeeType()->getAs<FunctionProtoType>();
626
89
  auto *RD =
627
89
      cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
628
629
89
  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
630
89
      CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
631
632
89
  llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
633
634
89
  llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
635
89
  llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
636
89
  llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
637
638
  // Extract memptr.adj, which is in the second field.
639
89
  llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
640
641
  // Compute the true adjustment.
642
89
  llvm::Value *Adj = RawAdj;
643
89
  if (UseARMMethodPtrABI)
644
19
    Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
645
646
  // Apply the adjustment and cast back to the original struct type
647
  // for consistency.
648
89
  llvm::Value *This = ThisAddr.getPointer();
649
89
  llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
650
89
  Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
651
89
  This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
652
89
  ThisPtrForCall = This;
653
654
  // Load the function pointer.
655
89
  llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
656
657
  // If the LSB in the function pointer is 1, the function pointer points to
658
  // a virtual function.
659
89
  llvm::Value *IsVirtual;
660
89
  if (UseARMMethodPtrABI)
661
19
    IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
662
70
  else
663
70
    IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
664
89
  IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
665
89
  Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
666
667
  // In the virtual path, the adjustment left 'This' pointing to the
668
  // vtable of the correct base subobject.  The "function pointer" is an
669
  // offset within the vtable (+1 for the virtual flag on non-ARM).
670
89
  CGF.EmitBlock(FnVirtual);
671
672
  // Cast the adjusted this to a pointer to vtable pointer and load.
673
89
  llvm::Type *VTableTy = Builder.getInt8PtrTy();
674
89
  CharUnits VTablePtrAlign =
675
89
    CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
676
89
                                      CGF.getPointerAlign());
677
89
  llvm::Value *VTable =
678
89
    CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
679
680
  // Apply the offset.
681
  // On ARM64, to reserve extra space in virtual member function pointers,
682
  // we only pay attention to the low 32 bits of the offset.
683
89
  llvm::Value *VTableOffset = FnAsInt;
684
89
  if (!UseARMMethodPtrABI)
685
70
    VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
686
89
  if (Use32BitVTableOffsetABI) {
687
2
    VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
688
2
    VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
689
2
  }
690
691
  // Check the address of the function pointer if CFI on member function
692
  // pointers is enabled.
693
89
  llvm::Constant *CheckSourceLocation;
694
89
  llvm::Constant *CheckTypeDesc;
695
89
  bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
696
3
                            CGM.HasHiddenLTOVisibility(RD);
697
89
  bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
698
1
                           CGM.HasHiddenLTOVisibility(RD);
699
89
  bool ShouldEmitWPDInfo =
700
89
      CGM.getCodeGenOpts().WholeProgramVTables &&
701
      // Don't insert type tests if we are forcing public std visibility.
702
2
      !CGM.HasLTOVisibilityPublicStd(RD);
703
89
  llvm::Value *VirtualFn = nullptr;
704
705
89
  {
706
89
    CodeGenFunction::SanitizerScope SanScope(&CGF);
707
89
    llvm::Value *TypeId = nullptr;
708
89
    llvm::Value *CheckResult = nullptr;
709
710
89
    if (ShouldEmitCFICheck || 
ShouldEmitVFEInfo87
||
ShouldEmitWPDInfo86
) {
711
      // If doing CFI, VFE or WPD, we will need the metadata node to check
712
      // against.
713
4
      llvm::Metadata *MD =
714
4
          CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
715
4
      TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
716
4
    }
717
718
89
    if (ShouldEmitVFEInfo) {
719
1
      llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
720
721
      // If doing VFE, load from the vtable with a type.checked.load intrinsic
722
      // call. Note that we use the GEP to calculate the address to load from
723
      // and pass 0 as the offset to the intrinsic. This is because every
724
      // vtable slot of the correct type is marked with matching metadata, and
725
      // we know that the load must be from one of these slots.
726
1
      llvm::Value *CheckedLoad = Builder.CreateCall(
727
1
          CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
728
1
          {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
729
1
      CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
730
1
      VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
731
1
      VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
732
1
                                        "memptr.virtualfn");
733
88
    } else {
734
      // When not doing VFE, emit a normal load, as it allows more
735
      // optimisations than type.checked.load.
736
88
      if (ShouldEmitCFICheck || 
ShouldEmitWPDInfo86
) {
737
3
        llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
738
3
        CheckResult = Builder.CreateCall(
739
3
            CGM.getIntrinsic(llvm::Intrinsic::type_test),
740
3
            {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
741
3
      }
742
743
88
      if (CGM.getItaniumVTableContext().isRelativeLayout()) {
744
1
        VirtualFn = CGF.Builder.CreateCall(
745
1
            CGM.getIntrinsic(llvm::Intrinsic::load_relative,
746
1
                             {VTableOffset->getType()}),
747
1
            {VTable, VTableOffset});
748
1
        VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
749
87
      } else {
750
87
        llvm::Value *VFPAddr = CGF.Builder.CreateGEP(VTable, VTableOffset);
751
87
        VFPAddr = CGF.Builder.CreateBitCast(
752
87
            VFPAddr, FTy->getPointerTo()->getPointerTo());
753
87
        VirtualFn = CGF.Builder.CreateAlignedLoad(
754
87
            VFPAddr, CGF.getPointerAlign(), "memptr.virtualfn");
755
87
      }
756
88
    }
757
89
    assert(VirtualFn && "Virtual fuction pointer not created!");
758
89
    assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
759
89
            CheckResult) &&
760
89
           "Check result required but not created!");
761
762
89
    if (ShouldEmitCFICheck) {
763
      // If doing CFI, emit the check.
764
2
      CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
765
2
      CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
766
2
      llvm::Constant *StaticData[] = {
767
2
          llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
768
2
          CheckSourceLocation,
769
2
          CheckTypeDesc,
770
2
      };
771
772
2
      if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
773
2
        CGF.EmitTrapCheck(CheckResult);
774
0
      } else {
775
0
        llvm::Value *AllVtables = llvm::MetadataAsValue::get(
776
0
            CGM.getLLVMContext(),
777
0
            llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
778
0
        llvm::Value *ValidVtable = Builder.CreateCall(
779
0
            CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
780
0
        CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
781
0
                      SanitizerHandler::CFICheckFail, StaticData,
782
0
                      {VTable, ValidVtable});
783
0
      }
784
785
2
      FnVirtual = Builder.GetInsertBlock();
786
2
    }
787
89
  } // End of sanitizer scope
788
789
89
  CGF.EmitBranch(FnEnd);
790
791
  // In the non-virtual path, the function pointer is actually a
792
  // function pointer.
793
89
  CGF.EmitBlock(FnNonVirtual);
794
89
  llvm::Value *NonVirtualFn =
795
89
    Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
796
797
  // Check the function pointer if CFI on member function pointers is enabled.
798
89
  if (ShouldEmitCFICheck) {
799
2
    CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
800
2
    if (RD->hasDefinition()) {
801
1
      CodeGenFunction::SanitizerScope SanScope(&CGF);
802
803
1
      llvm::Constant *StaticData[] = {
804
1
          llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
805
1
          CheckSourceLocation,
806
1
          CheckTypeDesc,
807
1
      };
808
809
1
      llvm::Value *Bit = Builder.getFalse();
810
1
      llvm::Value *CastedNonVirtualFn =
811
1
          Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
812
2
      for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
813
2
        llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
814
2
            getContext().getMemberPointerType(
815
2
                MPT->getPointeeType(),
816
2
                getContext().getRecordType(Base).getTypePtr()));
817
2
        llvm::Value *TypeId =
818
2
            llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
819
820
2
        llvm::Value *TypeTest =
821
2
            Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
822
2
                               {CastedNonVirtualFn, TypeId});
823
2
        Bit = Builder.CreateOr(Bit, TypeTest);
824
2
      }
825
826
1
      CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
827
1
                    SanitizerHandler::CFICheckFail, StaticData,
828
1
                    {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
829
830
1
      FnNonVirtual = Builder.GetInsertBlock();
831
1
    }
832
2
  }
833
834
  // We're done.
835
89
  CGF.EmitBlock(FnEnd);
836
89
  llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
837
89
  CalleePtr->addIncoming(VirtualFn, FnVirtual);
838
89
  CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
839
840
89
  CGCallee Callee(FPT, CalleePtr);
841
89
  return Callee;
842
89
}
843
844
/// Compute an l-value by applying the given pointer-to-member to a
845
/// base object.
846
llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
847
    CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
848
57
    const MemberPointerType *MPT) {
849
57
  assert(MemPtr->getType() == CGM.PtrDiffTy);
850
851
57
  CGBuilderTy &Builder = CGF.Builder;
852
853
  // Cast to char*.
854
57
  Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
855
856
  // Apply the offset, which we assume is non-null.
857
57
  llvm::Value *Addr =
858
57
    Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
859
860
  // Cast the address to the appropriate pointer type, adopting the
861
  // address space of the base pointer.
862
57
  llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
863
57
                            ->getPointerTo(Base.getAddressSpace());
864
57
  return Builder.CreateBitCast(Addr, PType);
865
57
}
866
867
/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
868
/// conversion.
869
///
870
/// Bitcast conversions are always a no-op under Itanium.
871
///
872
/// Obligatory offset/adjustment diagram:
873
///         <-- offset -->          <-- adjustment -->
874
///   |--------------------------|----------------------|--------------------|
875
///   ^Derived address point     ^Base address point    ^Member address point
876
///
877
/// So when converting a base member pointer to a derived member pointer,
878
/// we add the offset to the adjustment because the address point has
879
/// decreased;  and conversely, when converting a derived MP to a base MP
880
/// we subtract the offset from the adjustment because the address point
881
/// has increased.
882
///
883
/// The standard forbids (at compile time) conversion to and from
884
/// virtual bases, which is why we don't have to consider them here.
885
///
886
/// The standard forbids (at run time) casting a derived MP to a base
887
/// MP when the derived MP does not point to a member of the base.
888
/// This is why -1 is a reasonable choice for null data member
889
/// pointers.
890
llvm::Value *
891
ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
892
                                           const CastExpr *E,
893
54
                                           llvm::Value *src) {
894
54
  assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
895
54
         E->getCastKind() == CK_BaseToDerivedMemberPointer ||
896
54
         E->getCastKind() == CK_ReinterpretMemberPointer);
897
898
  // Under Itanium, reinterprets don't require any additional processing.
899
54
  if (E->getCastKind() == CK_ReinterpretMemberPointer) 
return src3
;
900
901
  // Use constant emission if we can.
902
51
  if (isa<llvm::Constant>(src))
903
30
    return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
904
905
21
  llvm::Constant *adj = getMemberPointerAdjustment(E);
906
21
  if (!adj) 
return src3
;
907
908
18
  CGBuilderTy &Builder = CGF.Builder;
909
18
  bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
910
911
18
  const MemberPointerType *destTy =
912
18
    E->getType()->castAs<MemberPointerType>();
913
914
  // For member data pointers, this is just a matter of adding the
915
  // offset if the source is non-null.
916
18
  if (destTy->isMemberDataPointer()) {
917
2
    llvm::Value *dst;
918
2
    if (isDerivedToBase)
919
1
      dst = Builder.CreateNSWSub(src, adj, "adj");
920
1
    else
921
1
      dst = Builder.CreateNSWAdd(src, adj, "adj");
922
923
    // Null check.
924
2
    llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
925
2
    llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
926
2
    return Builder.CreateSelect(isNull, src, dst);
927
2
  }
928
929
  // The this-adjustment is left-shifted by 1 on ARM.
930
16
  if (UseARMMethodPtrABI) {
931
8
    uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
932
8
    offset <<= 1;
933
8
    adj = llvm::ConstantInt::get(adj->getType(), offset);
934
8
  }
935
936
16
  llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
937
16
  llvm::Value *dstAdj;
938
16
  if (isDerivedToBase)
939
8
    dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
940
8
  else
941
8
    dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
942
943
16
  return Builder.CreateInsertValue(src, dstAdj, 1);
944
16
}
945
946
llvm::Constant *
947
ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
948
71
                                           llvm::Constant *src) {
949
71
  assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
950
71
         E->getCastKind() == CK_BaseToDerivedMemberPointer ||
951
71
         E->getCastKind() == CK_ReinterpretMemberPointer);
952
953
  // Under Itanium, reinterprets don't require any additional processing.
954
71
  if (E->getCastKind() == CK_ReinterpretMemberPointer) 
return src33
;
955
956
  // If the adjustment is trivial, we don't need to do anything.
957
38
  llvm::Constant *adj = getMemberPointerAdjustment(E);
958
38
  if (!adj) 
return src30
;
959
960
8
  bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
961
962
8
  const MemberPointerType *destTy =
963
8
    E->getType()->castAs<MemberPointerType>();
964
965
  // For member data pointers, this is just a matter of adding the
966
  // offset if the source is non-null.
967
8
  if (destTy->isMemberDataPointer()) {
968
    // null maps to null.
969
0
    if (src->isAllOnesValue()) return src;
970
971
0
    if (isDerivedToBase)
972
0
      return llvm::ConstantExpr::getNSWSub(src, adj);
973
0
    else
974
0
      return llvm::ConstantExpr::getNSWAdd(src, adj);
975
8
  }
976
977
  // The this-adjustment is left-shifted by 1 on ARM.
978
8
  if (UseARMMethodPtrABI) {
979
4
    uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
980
4
    offset <<= 1;
981
4
    adj = llvm::ConstantInt::get(adj->getType(), offset);
982
4
  }
983
984
8
  llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
985
8
  llvm::Constant *dstAdj;
986
8
  if (isDerivedToBase)
987
0
    dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
988
8
  else
989
8
    dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
990
991
8
  return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
992
8
}
993
994
llvm::Constant *
995
79
ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
996
  // Itanium C++ ABI 2.3:
997
  //   A NULL pointer is represented as -1.
998
79
  if (MPT->isMemberDataPointer())
999
54
    return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
1000
1001
25
  llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
1002
25
  llvm::Constant *Values[2] = { Zero, Zero };
1003
25
  return llvm::ConstantStruct::getAnon(Values);
1004
25
}
1005
1006
llvm::Constant *
1007
ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1008
59
                                     CharUnits offset) {
1009
  // Itanium C++ ABI 2.3:
1010
  //   A pointer to data member is an offset from the base address of
1011
  //   the class object containing it, represented as a ptrdiff_t
1012
59
  return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
1013
59
}
1014
1015
llvm::Constant *
1016
243
ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1017
243
  return BuildMemberPointer(MD, CharUnits::Zero());
1018
243
}
1019
1020
llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1021
434
                                                  CharUnits ThisAdjustment) {
1022
434
  assert(MD->isInstance() && "Member function must not be static!");
1023
1024
434
  CodeGenTypes &Types = CGM.getTypes();
1025
1026
  // Get the function pointer (or index if this is a virtual function).
1027
434
  llvm::Constant *MemPtr[2];
1028
434
  if (MD->isVirtual()) {
1029
126
    uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1030
126
    uint64_t VTableOffset;
1031
126
    if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1032
      // Multiply by 4-byte relative offsets.
1033
0
      VTableOffset = Index * 4;
1034
126
    } else {
1035
126
      const ASTContext &Context = getContext();
1036
126
      CharUnits PointerWidth = Context.toCharUnitsFromBits(
1037
126
          Context.getTargetInfo().getPointerWidth(0));
1038
126
      VTableOffset = Index * PointerWidth.getQuantity();
1039
126
    }
1040
1041
126
    if (UseARMMethodPtrABI) {
1042
      // ARM C++ ABI 3.2.1:
1043
      //   This ABI specifies that adj contains twice the this
1044
      //   adjustment, plus 1 if the member function is virtual. The
1045
      //   least significant bit of adj then makes exactly the same
1046
      //   discrimination as the least significant bit of ptr does for
1047
      //   Itanium.
1048
50
      MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1049
50
      MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1050
50
                                         2 * ThisAdjustment.getQuantity() + 1);
1051
76
    } else {
1052
      // Itanium C++ ABI 2.3:
1053
      //   For a virtual function, [the pointer field] is 1 plus the
1054
      //   virtual table offset (in bytes) of the function,
1055
      //   represented as a ptrdiff_t.
1056
76
      MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1057
76
      MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1058
76
                                         ThisAdjustment.getQuantity());
1059
76
    }
1060
308
  } else {
1061
308
    const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1062
308
    llvm::Type *Ty;
1063
    // Check whether the function has a computable LLVM signature.
1064
308
    if (Types.isFuncTypeConvertible(FPT)) {
1065
      // The function has a computable LLVM signature; use the correct type.
1066
306
      Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1067
2
    } else {
1068
      // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1069
      // function type is incomplete.
1070
2
      Ty = CGM.PtrDiffTy;
1071
2
    }
1072
308
    llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1073
1074
308
    MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1075
308
    MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1076
252
                                       (UseARMMethodPtrABI ? 
256
: 1) *
1077
308
                                       ThisAdjustment.getQuantity());
1078
308
  }
1079
1080
434
  return llvm::ConstantStruct::getAnon(MemPtr);
1081
434
}
1082
1083
llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1084
220
                                                 QualType MPType) {
1085
220
  const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1086
220
  const ValueDecl *MPD = MP.getMemberPointerDecl();
1087
220
  if (!MPD)
1088
4
    return EmitNullMemberPointer(MPT);
1089
1090
216
  CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
1091
1092
216
  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1093
191
    return BuildMemberPointer(MD, ThisAdjustment);
1094
1095
25
  CharUnits FieldOffset =
1096
25
    getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1097
25
  return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1098
25
}
1099
1100
/// The comparison algorithm is pretty easy: the member pointers are
1101
/// the same if they're either bitwise identical *or* both null.
1102
///
1103
/// ARM is different here only because null-ness is more complicated.
1104
llvm::Value *
1105
ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1106
                                           llvm::Value *L,
1107
                                           llvm::Value *R,
1108
                                           const MemberPointerType *MPT,
1109
12
                                           bool Inequality) {
1110
12
  CGBuilderTy &Builder = CGF.Builder;
1111
1112
12
  llvm::ICmpInst::Predicate Eq;
1113
12
  llvm::Instruction::BinaryOps And, Or;
1114
12
  if (Inequality) {
1115
2
    Eq = llvm::ICmpInst::ICMP_NE;
1116
2
    And = llvm::Instruction::Or;
1117
2
    Or = llvm::Instruction::And;
1118
10
  } else {
1119
10
    Eq = llvm::ICmpInst::ICMP_EQ;
1120
10
    And = llvm::Instruction::And;
1121
10
    Or = llvm::Instruction::Or;
1122
10
  }
1123
1124
  // Member data pointers are easy because there's a unique null
1125
  // value, so it just comes down to bitwise equality.
1126
12
  if (MPT->isMemberDataPointer())
1127
4
    return Builder.CreateICmp(Eq, L, R);
1128
1129
  // For member function pointers, the tautologies are more complex.
1130
  // The Itanium tautology is:
1131
  //   (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1132
  // The ARM tautology is:
1133
  //   (L == R) <==> (L.ptr == R.ptr &&
1134
  //                  (L.adj == R.adj ||
1135
  //                   (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1136
  // The inequality tautologies have exactly the same structure, except
1137
  // applying De Morgan's laws.
1138
1139
8
  llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1140
8
  llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1141
1142
  // This condition tests whether L.ptr == R.ptr.  This must always be
1143
  // true for equality to hold.
1144
8
  llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1145
1146
  // This condition, together with the assumption that L.ptr == R.ptr,
1147
  // tests whether the pointers are both null.  ARM imposes an extra
1148
  // condition.
1149
8
  llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1150
8
  llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1151
1152
  // This condition tests whether L.adj == R.adj.  If this isn't
1153
  // true, the pointers are unequal unless they're both null.
1154
8
  llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1155
8
  llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1156
8
  llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1157
1158
  // Null member function pointers on ARM clear the low bit of Adj,
1159
  // so the zero condition has to check that neither low bit is set.
1160
8
  if (UseARMMethodPtrABI) {
1161
4
    llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1162
1163
    // Compute (l.adj | r.adj) & 1 and test it against zero.
1164
4
    llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1165
4
    llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1166
4
    llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1167
4
                                                      "cmp.or.adj");
1168
4
    EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1169
4
  }
1170
1171
  // Tie together all our conditions.
1172
8
  llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1173
8
  Result = Builder.CreateBinOp(And, PtrEq, Result,
1174
8
                               Inequality ? 
"memptr.ne"0
: "memptr.eq");
1175
8
  return Result;
1176
8
}
1177
1178
llvm::Value *
1179
ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1180
                                          llvm::Value *MemPtr,
1181
52
                                          const MemberPointerType *MPT) {
1182
52
  CGBuilderTy &Builder = CGF.Builder;
1183
1184
  /// For member data pointers, this is just a check against -1.
1185
52
  if (MPT->isMemberDataPointer()) {
1186
5
    assert(MemPtr->getType() == CGM.PtrDiffTy);
1187
5
    llvm::Value *NegativeOne =
1188
5
      llvm::Constant::getAllOnesValue(MemPtr->getType());
1189
5
    return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1190
5
  }
1191
1192
  // In Itanium, a member function pointer is not null if 'ptr' is not null.
1193
47
  llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1194
1195
47
  llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1196
47
  llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1197
1198
  // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1199
  // (the virtual bit) is set.
1200
47
  if (UseARMMethodPtrABI) {
1201
20
    llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1202
20
    llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1203
20
    llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1204
20
    llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1205
20
                                                  "memptr.isvirtual");
1206
20
    Result = Builder.CreateOr(Result, IsVirtual);
1207
20
  }
1208
1209
47
  return Result;
1210
47
}
1211
1212
195k
bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1213
195k
  const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1214
195k
  if (!RD)
1215
191k
    return false;
1216
1217
  // If C++ prohibits us from making a copy, return by address.
1218
3.90k
  if (!RD->canPassInRegisters()) {
1219
700
    auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1220
700
    FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1221
700
    return true;
1222
700
  }
1223
3.20k
  return false;
1224
3.20k
}
1225
1226
/// The Itanium ABI requires non-zero initialization only for data
1227
/// member pointers, for which '0' is a valid offset.
1228
176
bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1229
176
  return MPT->isMemberFunctionPointer();
1230
176
}
1231
1232
/// The Itanium ABI always places an offset to the complete object
1233
/// at entry -2 in the vtable.
1234
void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1235
                                            const CXXDeleteExpr *DE,
1236
                                            Address Ptr,
1237
                                            QualType ElementType,
1238
31
                                            const CXXDestructorDecl *Dtor) {
1239
31
  bool UseGlobalDelete = DE->isGlobalDelete();
1240
31
  if (UseGlobalDelete) {
1241
    // Derive the complete-object pointer, which is what we need
1242
    // to pass to the deallocation function.
1243
1244
    // Grab the vtable pointer as an intptr_t*.
1245
6
    auto *ClassDecl =
1246
6
        cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1247
6
    llvm::Value *VTable =
1248
6
        CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1249
1250
    // Track back to entry -2 and pull out the offset there.
1251
6
    llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1252
6
        VTable, -2, "complete-offset.ptr");
1253
6
    llvm::Value *Offset =
1254
6
      CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1255
1256
    // Apply the offset.
1257
6
    llvm::Value *CompletePtr =
1258
6
      CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1259
6
    CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
1260
1261
    // If we're supposed to call the global delete, make sure we do so
1262
    // even if the destructor throws.
1263
6
    CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1264
6
                                    ElementType);
1265
6
  }
1266
1267
  // FIXME: Provide a source location here even though there's no
1268
  // CXXMemberCallExpr for dtor call.
1269
25
  CXXDtorType DtorType = UseGlobalDelete ? 
Dtor_Complete6
: Dtor_Deleting;
1270
31
  EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1271
1272
31
  if (UseGlobalDelete)
1273
6
    CGF.PopCleanupBlock();
1274
31
}
1275
1276
55
void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1277
  // void __cxa_rethrow();
1278
1279
55
  llvm::FunctionType *FTy =
1280
55
    llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1281
1282
55
  llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1283
1284
55
  if (isNoReturn)
1285
42
    CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1286
13
  else
1287
13
    CGF.EmitRuntimeCallOrInvoke(Fn);
1288
55
}
1289
1290
428
static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1291
  // void *__cxa_allocate_exception(size_t thrown_size);
1292
1293
428
  llvm::FunctionType *FTy =
1294
428
    llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1295
1296
428
  return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1297
428
}
1298
1299
428
static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1300
  // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1301
  //                  void (*dest) (void *));
1302
1303
428
  llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1304
428
  llvm::FunctionType *FTy =
1305
428
    llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1306
1307
428
  return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1308
428
}
1309
1310
428
void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1311
428
  QualType ThrowType = E->getSubExpr()->getType();
1312
  // Now allocate the exception object.
1313
428
  llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1314
428
  uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1315
1316
428
  llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1317
428
  llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1318
428
      AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1319
1320
428
  CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1321
428
  CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1322
1323
  // Now throw the exception.
1324
428
  llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1325
428
                                                         /*ForEH=*/true);
1326
1327
  // The address of the destructor.  If the exception type has a
1328
  // trivial destructor (or isn't a record), we just pass null.
1329
428
  llvm::Constant *Dtor = nullptr;
1330
428
  if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1331
314
    CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1332
314
    if (!Record->hasTrivialDestructor()) {
1333
298
      CXXDestructorDecl *DtorD = Record->getDestructor();
1334
298
      Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1335
298
      Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1336
298
    }
1337
314
  }
1338
428
  if (!Dtor) 
Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy)130
;
1339
1340
428
  llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1341
428
  CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1342
428
}
1343
1344
56
static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1345
  // void *__dynamic_cast(const void *sub,
1346
  //                      const abi::__class_type_info *src,
1347
  //                      const abi::__class_type_info *dst,
1348
  //                      std::ptrdiff_t src2dst_offset);
1349
1350
56
  llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1351
56
  llvm::Type *PtrDiffTy =
1352
56
    CGF.ConvertType(CGF.getContext().getPointerDiffType());
1353
1354
56
  llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1355
1356
56
  llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1357
1358
  // Mark the function as nounwind readonly.
1359
56
  llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1360
56
                                            llvm::Attribute::ReadOnly };
1361
56
  llvm::AttributeList Attrs = llvm::AttributeList::get(
1362
56
      CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1363
1364
56
  return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1365
56
}
1366
1367
9
static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1368
  // void __cxa_bad_cast();
1369
9
  llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1370
9
  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1371
9
}
1372
1373
/// Compute the src2dst_offset hint as described in the
1374
/// Itanium C++ ABI [2.9.7]
1375
static CharUnits computeOffsetHint(ASTContext &Context,
1376
                                   const CXXRecordDecl *Src,
1377
56
                                   const CXXRecordDecl *Dst) {
1378
56
  CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1379
56
                     /*DetectVirtual=*/false);
1380
1381
  // If Dst is not derived from Src we can skip the whole computation below and
1382
  // return that Src is not a public base of Dst.  Record all inheritance paths.
1383
56
  if (!Dst->isDerivedFrom(Src, Paths))
1384
5
    return CharUnits::fromQuantity(-2ULL);
1385
1386
51
  unsigned NumPublicPaths = 0;
1387
51
  CharUnits Offset;
1388
1389
  // Now walk all possible inheritance paths.
1390
57
  for (const CXXBasePath &Path : Paths) {
1391
57
    if (Path.Access != AS_public)  // Ignore non-public inheritance.
1392
9
      continue;
1393
1394
48
    ++NumPublicPaths;
1395
1396
75
    for (const CXXBasePathElement &PathElement : Path) {
1397
      // If the path contains a virtual base class we can't give any hint.
1398
      // -1: no hint.
1399
75
      if (PathElement.Base->isVirtual())
1400
9
        return CharUnits::fromQuantity(-1ULL);
1401
1402
66
      if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1403
9
        continue;
1404
1405
      // Accumulate the base class offsets.
1406
57
      const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1407
57
      Offset += L.getBaseClassOffset(
1408
57
          PathElement.Base->getType()->getAsCXXRecordDecl());
1409
57
    }
1410
48
  }
1411
1412
  // -2: Src is not a public base of Dst.
1413
42
  if (NumPublicPaths == 0)
1414
9
    return CharUnits::fromQuantity(-2ULL);
1415
1416
  // -3: Src is a multiple public base type but never a virtual base type.
1417
33
  if (NumPublicPaths > 1)
1418
0
    return CharUnits::fromQuantity(-3ULL);
1419
1420
  // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1421
  // Return the offset of Src from the origin of Dst.
1422
33
  return Offset;
1423
33
}
1424
1425
19
static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1426
  // void __cxa_bad_typeid();
1427
19
  llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1428
1429
19
  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1430
19
}
1431
1432
bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1433
28
                                              QualType SrcRecordTy) {
1434
28
  return IsDeref;
1435
28
}
1436
1437
19
void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1438
19
  llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1439
19
  llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1440
19
  Call->setDoesNotReturn();
1441
19
  CGF.Builder.CreateUnreachable();
1442
19
}
1443
1444
llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1445
                                       QualType SrcRecordTy,
1446
                                       Address ThisPtr,
1447
28
                                       llvm::Type *StdTypeInfoPtrTy) {
1448
28
  auto *ClassDecl =
1449
28
      cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1450
28
  llvm::Value *Value =
1451
28
      CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1452
1453
28
  if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1454
    // Load the type info.
1455
1
    Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1456
1
    Value = CGF.Builder.CreateCall(
1457
1
        CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1458
1
        {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1459
1460
    // Setup to dereference again since this is a proxy we accessed.
1461
1
    Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
1462
27
  } else {
1463
    // Load the type info.
1464
27
    Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1465
27
  }
1466
28
  return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
1467
28
}
1468
1469
bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1470
59
                                                       QualType SrcRecordTy) {
1471
59
  return SrcIsPtr;
1472
59
}
1473
1474
llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1475
    CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1476
56
    QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1477
56
  llvm::Type *PtrDiffLTy =
1478
56
      CGF.ConvertType(CGF.getContext().getPointerDiffType());
1479
56
  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1480
1481
56
  llvm::Value *SrcRTTI =
1482
56
      CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1483
56
  llvm::Value *DestRTTI =
1484
56
      CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1485
1486
  // Compute the offset hint.
1487
56
  const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1488
56
  const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1489
56
  llvm::Value *OffsetHint = llvm::ConstantInt::get(
1490
56
      PtrDiffLTy,
1491
56
      computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1492
1493
  // Emit the call to __dynamic_cast.
1494
56
  llvm::Value *Value = ThisAddr.getPointer();
1495
56
  Value = CGF.EmitCastToVoidPtr(Value);
1496
1497
56
  llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1498
56
  Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1499
56
  Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1500
1501
  /// C++ [expr.dynamic.cast]p9:
1502
  ///   A failed cast to reference type throws std::bad_cast
1503
56
  if (DestTy->isReferenceType()) {
1504
8
    llvm::BasicBlock *BadCastBlock =
1505
8
        CGF.createBasicBlock("dynamic_cast.bad_cast");
1506
1507
8
    llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1508
8
    CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1509
1510
8
    CGF.EmitBlock(BadCastBlock);
1511
8
    EmitBadCastCall(CGF);
1512
8
  }
1513
1514
56
  return Value;
1515
56
}
1516
1517
llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1518
                                                  Address ThisAddr,
1519
                                                  QualType SrcRecordTy,
1520
3
                                                  QualType DestTy) {
1521
3
  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1522
3
  auto *ClassDecl =
1523
3
      cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1524
3
  llvm::Value *OffsetToTop;
1525
3
  if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1526
    // Get the vtable pointer.
1527
1
    llvm::Value *VTable =
1528
1
        CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
1529
1530
    // Get the offset-to-top from the vtable.
1531
1
    OffsetToTop =
1532
1
        CGF.Builder.CreateConstInBoundsGEP1_32(/*Type=*/nullptr, VTable, -2U);
1533
1
    OffsetToTop = CGF.Builder.CreateAlignedLoad(
1534
1
        OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1535
2
  } else {
1536
2
    llvm::Type *PtrDiffLTy =
1537
2
        CGF.ConvertType(CGF.getContext().getPointerDiffType());
1538
1539
    // Get the vtable pointer.
1540
2
    llvm::Value *VTable =
1541
2
        CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
1542
1543
    // Get the offset-to-top from the vtable.
1544
2
    OffsetToTop = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1545
2
    OffsetToTop = CGF.Builder.CreateAlignedLoad(
1546
2
        OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1547
2
  }
1548
  // Finally, add the offset to the pointer.
1549
3
  llvm::Value *Value = ThisAddr.getPointer();
1550
3
  Value = CGF.EmitCastToVoidPtr(Value);
1551
3
  Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1552
3
  return CGF.Builder.CreateBitCast(Value, DestLTy);
1553
3
}
1554
1555
9
bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1556
9
  llvm::FunctionCallee Fn = getBadCastFn(CGF);
1557
9
  llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1558
9
  Call->setDoesNotReturn();
1559
9
  CGF.Builder.CreateUnreachable();
1560
9
  return true;
1561
9
}
1562
1563
llvm::Value *
1564
ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1565
                                         Address This,
1566
                                         const CXXRecordDecl *ClassDecl,
1567
447
                                         const CXXRecordDecl *BaseClassDecl) {
1568
447
  llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1569
447
  CharUnits VBaseOffsetOffset =
1570
447
      CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1571
447
                                                               BaseClassDecl);
1572
447
  llvm::Value *VBaseOffsetPtr =
1573
447
    CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1574
447
                                   "vbase.offset.ptr");
1575
1576
447
  llvm::Value *VBaseOffset;
1577
447
  if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1578
1
    VBaseOffsetPtr =
1579
1
        CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
1580
1
    VBaseOffset = CGF.Builder.CreateAlignedLoad(
1581
1
        VBaseOffsetPtr, CharUnits::fromQuantity(4), "vbase.offset");
1582
446
  } else {
1583
446
    VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1584
446
                                               CGM.PtrDiffTy->getPointerTo());
1585
446
    VBaseOffset = CGF.Builder.CreateAlignedLoad(
1586
446
        VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1587
446
  }
1588
447
  return VBaseOffset;
1589
447
}
1590
1591
52.8k
void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1592
  // Just make sure we're in sync with TargetCXXABI.
1593
52.8k
  assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1594
1595
  // The constructor used for constructing this as a base class;
1596
  // ignores virtual bases.
1597
52.8k
  CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1598
1599
  // The constructor used for constructing this as a complete class;
1600
  // constructs the virtual bases, then calls the base constructor.
1601
52.8k
  if (!D->getParent()->isAbstract()) {
1602
    // We don't need to emit the complete ctor if the class is abstract.
1603
51.9k
    CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1604
51.9k
  }
1605
52.8k
}
1606
1607
CGCXXABI::AddedStructorArgCounts
1608
ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1609
268k
                                      SmallVectorImpl<CanQualType> &ArgTys) {
1610
268k
  ASTContext &Context = getContext();
1611
1612
  // All parameters are already in place except VTT, which goes after 'this'.
1613
  // These are Clang types, so we don't need to worry about sret yet.
1614
1615
  // Check if we need to add a VTT parameter (which has type void **).
1616
268k
  if ((isa<CXXConstructorDecl>(GD.getDecl()) ? 
GD.getCtorType() == Ctor_Base159k
1617
108k
                                             : GD.getDtorType() == Dtor_Base) &&
1618
125k
      cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1619
1.11k
    ArgTys.insert(ArgTys.begin() + 1,
1620
1.11k
                  Context.getPointerType(Context.VoidPtrTy));
1621
1.11k
    return AddedStructorArgCounts::prefix(1);
1622
1.11k
  }
1623
267k
  return AddedStructorArgCounts{};
1624
267k
}
1625
1626
8.45k
void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1627
  // The destructor used for destructing this as a base class; ignores
1628
  // virtual bases.
1629
8.45k
  CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1630
1631
  // The destructor used for destructing this as a most-derived class;
1632
  // call the base destructor and then destructs any virtual bases.
1633
8.45k
  CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1634
1635
  // The destructor in a virtual table is always a 'deleting'
1636
  // destructor, which calls the complete destructor and then uses the
1637
  // appropriate operator delete.
1638
8.45k
  if (D->isVirtual())
1639
533
    CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1640
8.45k
}
1641
1642
void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1643
                                              QualType &ResTy,
1644
52.4k
                                              FunctionArgList &Params) {
1645
52.4k
  const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1646
52.4k
  assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1647
1648
  // Check if we need a VTT parameter as well.
1649
52.4k
  if (NeedsVTTParameter(CGF.CurGD)) {
1650
251
    ASTContext &Context = getContext();
1651
1652
    // FIXME: avoid the fake decl
1653
251
    QualType T = Context.getPointerType(Context.VoidPtrTy);
1654
251
    auto *VTTDecl = ImplicitParamDecl::Create(
1655
251
        Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1656
251
        T, ImplicitParamDecl::CXXVTT);
1657
251
    Params.insert(Params.begin() + 1, VTTDecl);
1658
251
    getStructorImplicitParamDecl(CGF) = VTTDecl;
1659
251
  }
1660
52.4k
}
1661
1662
92.3k
void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1663
  // Naked functions have no prolog.
1664
92.3k
  if (CGF.CurFuncDecl && 
CGF.CurFuncDecl->hasAttr<NakedAttr>()92.0k
)
1665
1
    return;
1666
1667
  /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1668
  /// adjustments are required, because they are all handled by thunks.
1669
92.3k
  setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1670
1671
  /// Initialize the 'vtt' slot if needed.
1672
92.3k
  if (getStructorImplicitParamDecl(CGF)) {
1673
251
    getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1674
251
        CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1675
251
  }
1676
1677
  /// If this is a function that the ABI specifies returns 'this', initialize
1678
  /// the return slot to 'this' at the start of the function.
1679
  ///
1680
  /// Unlike the setting of return types, this is done within the ABI
1681
  /// implementation instead of by clients of CGCXXABI because:
1682
  /// 1) getThisValue is currently protected
1683
  /// 2) in theory, an ABI could implement 'this' returns some other way;
1684
  ///    HasThisReturn only specifies a contract, not the implementation
1685
92.3k
  if (HasThisReturn(CGF.CurGD))
1686
405
    CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1687
92.3k
}
1688
1689
CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1690
    CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1691
48.6k
    bool ForVirtualBase, bool Delegating) {
1692
48.6k
  if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1693
48.4k
    return AddedStructorArgs{};
1694
1695
  // Insert the implicit 'vtt' argument as the second argument.
1696
128
  llvm::Value *VTT =
1697
128
      CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1698
128
  QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1699
128
  return AddedStructorArgs::prefix({{VTT, VTTTy}});
1700
128
}
1701
1702
llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1703
    CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1704
26.1k
    bool ForVirtualBase, bool Delegating) {
1705
26.1k
  GlobalDecl GD(DD, Type);
1706
26.1k
  return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1707
26.1k
}
1708
1709
void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1710
                                       const CXXDestructorDecl *DD,
1711
                                       CXXDtorType Type, bool ForVirtualBase,
1712
                                       bool Delegating, Address This,
1713
26.1k
                                       QualType ThisTy) {
1714
26.1k
  GlobalDecl GD(DD, Type);
1715
26.1k
  llvm::Value *VTT =
1716
26.1k
      getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1717
26.1k
  QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1718
1719
26.1k
  CGCallee Callee;
1720
26.1k
  if (getContext().getLangOpts().AppleKext &&
1721
13
      Type != Dtor_Base && 
DD->isVirtual()7
)
1722
7
    Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1723
26.1k
  else
1724
26.1k
    Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1725
1726
26.1k
  CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1727
26.1k
                            nullptr);
1728
26.1k
}
1729
1730
void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1731
2.36k
                                          const CXXRecordDecl *RD) {
1732
2.36k
  llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1733
2.36k
  if (VTable->hasInitializer())
1734
749
    return;
1735
1736
1.61k
  ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1737
1.61k
  const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1738
1.61k
  llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1739
1.61k
  llvm::Constant *RTTI =
1740
1.61k
      CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1741
1742
  // Create and set the initializer.
1743
1.61k
  ConstantInitBuilder builder(CGM);
1744
1.61k
  auto components = builder.beginStruct();
1745
1.61k
  CGVT.createVTableInitializer(components, VTLayout, RTTI,
1746
1.61k
                               llvm::GlobalValue::isLocalLinkage(Linkage));
1747
1.61k
  components.finishAndSetAsInitializer(VTable);
1748
1749
  // Set the correct linkage.
1750
1.61k
  VTable->setLinkage(Linkage);
1751
1752
1.61k
  if (CGM.supportsCOMDAT() && 
VTable->isWeakForLinker()647
)
1753
355
    VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1754
1755
  // Set the right visibility.
1756
1.61k
  CGM.setGVProperties(VTable, RD);
1757
1758
  // If this is the magic class __cxxabiv1::__fundamental_type_info,
1759
  // we will emit the typeinfo for the fundamental types. This is the
1760
  // same behaviour as GCC.
1761
1.61k
  const DeclContext *DC = RD->getDeclContext();
1762
1.61k
  if (RD->getIdentifier() &&
1763
1.61k
      RD->getIdentifier()->isStr("__fundamental_type_info") &&
1764
4
      isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1765
4
      cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1766
4
      DC->getParent()->isTranslationUnit())
1767
4
    EmitFundamentalRTTIDescriptors(RD);
1768
1769
1.61k
  if (!VTable->isDeclarationForLinker())
1770
1.48k
    CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1771
1772
1.61k
  if (VTContext.isRelativeLayout() && 
!VTable->isDSOLocal()46
)
1773
42
    CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1774
1.61k
}
1775
1776
bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1777
2.31k
    CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1778
2.31k
  if (Vptr.NearestVBase == nullptr)
1779
2.09k
    return false;
1780
225
  return NeedsVTTParameter(CGF.CurGD);
1781
225
}
1782
1783
llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1784
    CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1785
2.31k
    const CXXRecordDecl *NearestVBase) {
1786
1787
2.31k
  if ((Base.getBase()->getNumVBases() || 
NearestVBase != nullptr1.87k
) &&
1788
658
      NeedsVTTParameter(CGF.CurGD)) {
1789
277
    return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1790
277
                                                  NearestVBase);
1791
277
  }
1792
2.04k
  return getVTableAddressPoint(Base, VTableClass);
1793
2.04k
}
1794
1795
llvm::Constant *
1796
ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1797
2.34k
                                     const CXXRecordDecl *VTableClass) {
1798
2.34k
  llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1799
1800
  // Find the appropriate vtable within the vtable group, and the address point
1801
  // within that vtable.
1802
2.34k
  VTableLayout::AddressPointLocation AddressPoint =
1803
2.34k
      CGM.getItaniumVTableContext()
1804
2.34k
          .getVTableLayout(VTableClass)
1805
2.34k
          .getAddressPoint(Base);
1806
2.34k
  llvm::Value *Indices[] = {
1807
2.34k
    llvm::ConstantInt::get(CGM.Int32Ty, 0),
1808
2.34k
    llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1809
2.34k
    llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1810
2.34k
  };
1811
1812
2.34k
  return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1813
2.34k
                                              Indices, /*InBounds=*/true,
1814
2.34k
                                              /*InRangeIndex=*/1);
1815
2.34k
}
1816
1817
llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1818
    CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1819
277
    const CXXRecordDecl *NearestVBase) {
1820
277
  assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1821
277
         NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1822
1823
  // Get the secondary vpointer index.
1824
277
  uint64_t VirtualPointerIndex =
1825
277
      CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1826
1827
  /// Load the VTT.
1828
277
  llvm::Value *VTT = CGF.LoadCXXVTT();
1829
277
  if (VirtualPointerIndex)
1830
94
    VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1831
1832
  // And load the address point from the VTT.
1833
277
  return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
1834
277
}
1835
1836
llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1837
237
    BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1838
237
  return getVTableAddressPoint(Base, VTableClass);
1839
237
}
1840
1841
llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1842
5.67k
                                                     CharUnits VPtrOffset) {
1843
5.67k
  assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1844
1845
5.67k
  llvm::GlobalVariable *&VTable = VTables[RD];
1846
5.67k
  if (VTable)
1847
3.52k
    return VTable;
1848
1849
  // Queue up this vtable for possible deferred emission.
1850
2.15k
  CGM.addDeferredVTable(RD);
1851
1852
2.15k
  SmallString<256> Name;
1853
2.15k
  llvm::raw_svector_ostream Out(Name);
1854
2.15k
  getMangleContext().mangleCXXVTable(RD, Out);
1855
1856
2.15k
  const VTableLayout &VTLayout =
1857
2.15k
      CGM.getItaniumVTableContext().getVTableLayout(RD);
1858
2.15k
  llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1859
1860
  // Use pointer alignment for the vtable. Otherwise we would align them based
1861
  // on the size of the initializer which doesn't make sense as only single
1862
  // values are read.
1863
2.15k
  unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1864
46
                        ? 32
1865
2.10k
                        : CGM.getTarget().getPointerAlign(0);
1866
1867
2.15k
  VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1868
2.15k
      Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1869
2.15k
      getContext().toCharUnitsFromBits(PAlign).getQuantity());
1870
2.15k
  VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1871
1872
2.15k
  CGM.setGVProperties(VTable, RD);
1873
1874
2.15k
  return VTable;
1875
2.15k
}
1876
1877
CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1878
                                                  GlobalDecl GD,
1879
                                                  Address This,
1880
                                                  llvm::Type *Ty,
1881
789
                                                  SourceLocation Loc) {
1882
789
  auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1883
789
  llvm::Value *VTable = CGF.GetVTablePtr(
1884
789
      This, Ty->getPointerTo()->getPointerTo(), MethodDecl->getParent());
1885
1886
789
  uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1887
789
  llvm::Value *VFunc;
1888
789
  if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1889
8
    VFunc = CGF.EmitVTableTypeCheckedLoad(
1890
8
        MethodDecl->getParent(), VTable,
1891
8
        VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1892
781
  } else {
1893
781
    CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1894
1895
781
    llvm::Value *VFuncLoad;
1896
781
    if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1897
17
      VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
1898
17
      llvm::Value *Load = CGF.Builder.CreateCall(
1899
17
          CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1900
17
          {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
1901
17
      VFuncLoad = CGF.Builder.CreateBitCast(Load, Ty->getPointerTo());
1902
764
    } else {
1903
764
      VTable =
1904
764
          CGF.Builder.CreateBitCast(VTable, Ty->getPointerTo()->getPointerTo());
1905
764
      llvm::Value *VTableSlotPtr =
1906
764
          CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1907
764
      VFuncLoad =
1908
764
          CGF.Builder.CreateAlignedLoad(VTableSlotPtr, CGF.getPointerAlign());
1909
764
    }
1910
1911
    // Add !invariant.load md to virtual function load to indicate that
1912
    // function didn't change inside vtable.
1913
    // It's safe to add it without -fstrict-vtable-pointers, but it would not
1914
    // help in devirtualization because it will only matter if we will have 2
1915
    // the same virtual function loads from the same vtable load, which won't
1916
    // happen without enabled devirtualization with -fstrict-vtable-pointers.
1917
781
    if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1918
85
        CGM.getCodeGenOpts().StrictVTablePointers) {
1919
44
      if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
1920
44
        VFuncLoadInstr->setMetadata(
1921
44
            llvm::LLVMContext::MD_invariant_load,
1922
44
            llvm::MDNode::get(CGM.getLLVMContext(),
1923
44
                              llvm::ArrayRef<llvm::Metadata *>()));
1924
44
      }
1925
44
    }
1926
781
    VFunc = VFuncLoad;
1927
781
  }
1928
1929
789
  CGCallee Callee(GD, VFunc);
1930
789
  return Callee;
1931
789
}
1932
1933
llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1934
    CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1935
39
    Address This, DeleteOrMemberCallExpr E) {
1936
39
  auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
1937
39
  auto *D = E.dyn_cast<const CXXDeleteExpr *>();
1938
39
  assert((CE != nullptr) ^ (D != nullptr));
1939
39
  assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1940
39
  assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1941
1942
39
  GlobalDecl GD(Dtor, DtorType);
1943
39
  const CGFunctionInfo *FInfo =
1944
39
      &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
1945
39
  llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1946
39
  CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
1947
1948
39
  QualType ThisTy;
1949
39
  if (CE) {
1950
8
    ThisTy = CE->getObjectType();
1951
31
  } else {
1952
31
    ThisTy = D->getDestroyedType();
1953
31
  }
1954
1955
39
  CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
1956
39
                            QualType(), nullptr);
1957
39
  return nullptr;
1958
39
}
1959
1960
363
void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
1961
363
  CodeGenVTables &VTables = CGM.getVTables();
1962
363
  llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
1963
363
  VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
1964
363
}
1965
1966
bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
1967
430
    const CXXRecordDecl *RD) const {
1968
  // We don't emit available_externally vtables if we are in -fapple-kext mode
1969
  // because kext mode does not permit devirtualization.
1970
430
  if (CGM.getLangOpts().AppleKext)
1971
0
    return false;
1972
1973
  // If the vtable is hidden then it is not safe to emit an available_externally
1974
  // copy of vtable.
1975
430
  if (isVTableHidden(RD))
1976
19
    return false;
1977
1978
411
  if (CGM.getCodeGenOpts().ForceEmitVTables)
1979
52
    return true;
1980
1981
  // If we don't have any not emitted inline virtual function then we are safe
1982
  // to emit an available_externally copy of vtable.
1983
  // FIXME we can still emit a copy of the vtable if we
1984
  // can emit definition of the inline functions.
1985
359
  if (hasAnyUnusedVirtualInlineFunction(RD))
1986
149
    return false;
1987
1988
  // For a class with virtual bases, we must also be able to speculatively
1989
  // emit the VTT, because CodeGen doesn't have separate notions of "can emit
1990
  // the vtable" and "can emit the VTT". For a base subobject, this means we
1991
  // need to be able to emit non-virtual base vtables.
1992
210
  if (RD->getNumVBases()) {
1993
40
    for (const auto &B : RD->bases()) {
1994
40
      auto *BRD = B.getType()->getAsCXXRecordDecl();
1995
40
      assert(BRD && "no class for base specifier");
1996
40
      if (B.isVirtual() || 
!BRD->isDynamicClass()22
)
1997
19
        continue;
1998
21
      if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
1999
8
        return false;
2000
21
    }
2001
35
  }
2002
2003
202
  return true;
2004
210
}
2005
2006
397
bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2007
397
  if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2008
168
    return false;
2009
2010
  // For a complete-object vtable (or more specifically, for the VTT), we need
2011
  // to be able to speculatively emit the vtables of all dynamic virtual bases.
2012
229
  for (const auto &B : RD->vbases()) {
2013
18
    auto *BRD = B.getType()->getAsCXXRecordDecl();
2014
18
    assert(BRD && "no class for base specifier");
2015
18
    if (!BRD->isDynamicClass())
2016
6
      continue;
2017
12
    if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2018
0
      return false;
2019
12
  }
2020
2021
229
  return true;
2022
229
}
2023
static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2024
                                          Address InitialPtr,
2025
                                          int64_t NonVirtualAdjustment,
2026
                                          int64_t VirtualAdjustment,
2027
366
                                          bool IsReturnAdjustment) {
2028
366
  if (!NonVirtualAdjustment && 
!VirtualAdjustment208
)
2029
13
    return InitialPtr.getPointer();
2030
2031
353
  Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
2032
2033
  // In a base-to-derived cast, the non-virtual adjustment is applied first.
2034
353
  if (NonVirtualAdjustment && 
!IsReturnAdjustment158
) {
2035
147
    V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2036
147
                              CharUnits::fromQuantity(NonVirtualAdjustment));
2037
147
  }
2038
2039
  // Perform the virtual adjustment if we have one.
2040
353
  llvm::Value *ResultPtr;
2041
353
  if (VirtualAdjustment) {
2042
209
    Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
2043
209
    llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2044
2045
209
    llvm::Value *Offset;
2046
209
    llvm::Value *OffsetPtr =
2047
209
        CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
2048
209
    if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2049
      // Load the adjustment offset from the vtable as a 32-bit int.
2050
2
      OffsetPtr =
2051
2
          CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
2052
2
      Offset =
2053
2
          CGF.Builder.CreateAlignedLoad(OffsetPtr, CharUnits::fromQuantity(4));
2054
207
    } else {
2055
207
      llvm::Type *PtrDiffTy =
2056
207
          CGF.ConvertType(CGF.getContext().getPointerDiffType());
2057
2058
207
      OffsetPtr =
2059
207
          CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
2060
2061
      // Load the adjustment offset from the vtable.
2062
207
      Offset = CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
2063
207
    }
2064
    // Adjust our pointer.
2065
209
    ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
2066
144
  } else {
2067
144
    ResultPtr = V.getPointer();
2068
144
  }
2069
2070
  // In a derived-to-base conversion, the non-virtual adjustment is
2071
  // applied second.
2072
353
  if (NonVirtualAdjustment && 
IsReturnAdjustment158
) {
2073
11
    ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
2074
11
                                                       NonVirtualAdjustment);
2075
11
  }
2076
2077
  // Cast back to the original type.
2078
353
  return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
2079
353
}
2080
2081
llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2082
                                                  Address This,
2083
337
                                                  const ThisAdjustment &TA) {
2084
337
  return performTypeAdjustment(CGF, This, TA.NonVirtual,
2085
337
                               TA.Virtual.Itanium.VCallOffsetOffset,
2086
337
                               /*IsReturnAdjustment=*/false);
2087
337
}
2088
2089
llvm::Value *
2090
ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2091
29
                                       const ReturnAdjustment &RA) {
2092
29
  return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2093
29
                               RA.Virtual.Itanium.VBaseOffsetOffset,
2094
29
                               /*IsReturnAdjustment=*/true);
2095
29
}
2096
2097
void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2098
6
                                    RValue RV, QualType ResultType) {
2099
6
  if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2100
0
    return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2101
2102
  // Destructor thunks in the ARM ABI have indeterminate results.
2103
6
  llvm::Type *T = CGF.ReturnValue.getElementType();
2104
6
  RValue Undef = RValue::get(llvm::UndefValue::get(T));
2105
6
  return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2106
6
}
2107
2108
/************************** Array allocation cookies **************************/
2109
2110
168
CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2111
  // The array cookie is a size_t; pad that up to the element alignment.
2112
  // The cookie is actually right-justified in that space.
2113
168
  return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2114
168
                  CGM.getContext().getTypeAlignInChars(elementType));
2115
168
}
2116
2117
Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2118
                                             Address NewPtr,
2119
                                             llvm::Value *NumElements,
2120
                                             const CXXNewExpr *expr,
2121
41
                                             QualType ElementType) {
2122
41
  assert(requiresArrayCookie(expr));
2123
2124
41
  unsigned AS = NewPtr.getAddressSpace();
2125
2126
41
  ASTContext &Ctx = getContext();
2127
41
  CharUnits SizeSize = CGF.getSizeSize();
2128
2129
  // The size of the cookie.
2130
41
  CharUnits CookieSize =
2131
41
    std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
2132
41
  assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2133
2134
  // Compute an offset to the cookie.
2135
41
  Address CookiePtr = NewPtr;
2136
41
  CharUnits CookieOffset = CookieSize - SizeSize;
2137
41
  if (!CookieOffset.isZero())
2138
5
    CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2139
2140
  // Write the number of elements into the appropriate slot.
2141
41
  Address NumElementsPtr =
2142
41
      CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2143
41
  llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2144
2145
  // Handle the array cookie specially in ASan.
2146
41
  if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && 
AS == 08
&&
2147
8
      (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2148
6
       
CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie4
)) {
2149
    // The store to the CookiePtr does not need to be instrumented.
2150
6
    CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2151
6
    llvm::FunctionType *FTy =
2152
6
        llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2153
6
    llvm::FunctionCallee F =
2154
6
        CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2155
6
    CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2156
6
  }
2157
2158
  // Finally, compute a pointer to the actual data buffer by skipping
2159
  // over the cookie completely.
2160
41
  return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2161
41
}
2162
2163
llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2164
                                                Address allocPtr,
2165
45
                                                CharUnits cookieSize) {
2166
  // The element size is right-justified in the cookie.
2167
45
  Address numElementsPtr = allocPtr;
2168
45
  CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2169
45
  if (!numElementsOffset.isZero())
2170
5
    numElementsPtr =
2171
5
      CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2172
2173
45
  unsigned AS = allocPtr.getAddressSpace();
2174
45
  numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2175
45
  if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || 
AS != 02
)
2176
43
    return CGF.Builder.CreateLoad(numElementsPtr);
2177
  // In asan mode emit a function call instead of a regular load and let the
2178
  // run-time deal with it: if the shadow is properly poisoned return the
2179
  // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2180
  // We can't simply ignore this load using nosanitize metadata because
2181
  // the metadata may be lost.
2182
2
  llvm::FunctionType *FTy =
2183
2
      llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2184
2
  llvm::FunctionCallee F =
2185
2
      CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2186
2
  return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2187
2
}
2188
2189
64
CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2190
  // ARM says that the cookie is always:
2191
  //   struct array_cookie {
2192
  //     std::size_t element_size; // element_size != 0
2193
  //     std::size_t element_count;
2194
  //   };
2195
  // But the base ABI doesn't give anything an alignment greater than
2196
  // 8, so we can dismiss this as typical ABI-author blindness to
2197
  // actual language complexity and round up to the element alignment.
2198
64
  return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2199
64
                  CGM.getContext().getTypeAlignInChars(elementType));
2200
64
}
2201
2202
Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2203
                                         Address newPtr,
2204
                                         llvm::Value *numElements,
2205
                                         const CXXNewExpr *expr,
2206
18
                                         QualType elementType) {
2207
18
  assert(requiresArrayCookie(expr));
2208
2209
  // The cookie is always at the start of the buffer.
2210
18
  Address cookie = newPtr;
2211
2212
  // The first element is the element size.
2213
18
  cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2214
18
  llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2215
18
                 getContext().getTypeSizeInChars(elementType).getQuantity());
2216
18
  CGF.Builder.CreateStore(elementSize, cookie);
2217
2218
  // The second element is the element count.
2219
18
  cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2220
18
  CGF.Builder.CreateStore(numElements, cookie);
2221
2222
  // Finally, compute a pointer to the actual data buffer by skipping
2223
  // over the cookie completely.
2224
18
  CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2225
18
  return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2226
18
}
2227
2228
llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2229
                                            Address allocPtr,
2230
10
                                            CharUnits cookieSize) {
2231
  // The number of elements is at offset sizeof(size_t) relative to
2232
  // the allocated pointer.
2233
10
  Address numElementsPtr
2234
10
    = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2235
2236
10
  numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2237
10
  return CGF.Builder.CreateLoad(numElementsPtr);
2238
10
}
2239
2240
/*********************** Static local initialization **************************/
2241
2242
static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2243
170
                                              llvm::PointerType *GuardPtrTy) {
2244
  // int __cxa_guard_acquire(__guard *guard_object);
2245
170
  llvm::FunctionType *FTy =
2246
170
    llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2247
170
                            GuardPtrTy, /*isVarArg=*/false);
2248
170
  return CGM.CreateRuntimeFunction(
2249
170
      FTy, "__cxa_guard_acquire",
2250
170
      llvm::AttributeList::get(CGM.getLLVMContext(),
2251
170
                               llvm::AttributeList::FunctionIndex,
2252
170
                               llvm::Attribute::NoUnwind));
2253
170
}
2254
2255
static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2256
170
                                              llvm::PointerType *GuardPtrTy) {
2257
  // void __cxa_guard_release(__guard *guard_object);
2258
170
  llvm::FunctionType *FTy =
2259
170
    llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2260
170
  return CGM.CreateRuntimeFunction(
2261
170
      FTy, "__cxa_guard_release",
2262
170
      llvm::AttributeList::get(CGM.getLLVMContext(),
2263
170
                               llvm::AttributeList::FunctionIndex,
2264
170
                               llvm::Attribute::NoUnwind));
2265
170
}
2266
2267
static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2268
34
                                            llvm::PointerType *GuardPtrTy) {
2269
  // void __cxa_guard_abort(__guard *guard_object);
2270
34
  llvm::FunctionType *FTy =
2271
34
    llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2272
34
  return CGM.CreateRuntimeFunction(
2273
34
      FTy, "__cxa_guard_abort",
2274
34
      llvm::AttributeList::get(CGM.getLLVMContext(),
2275
34
                               llvm::AttributeList::FunctionIndex,
2276
34
                               llvm::Attribute::NoUnwind));
2277
34
}
2278
2279
namespace {
2280
  struct CallGuardAbort final : EHScopeStack::Cleanup {
2281
    llvm::GlobalVariable *Guard;
2282
170
    CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2283
2284
34
    void Emit(CodeGenFunction &CGF, Flags flags) override {
2285
34
      CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2286
34
                                  Guard);
2287
34
    }
2288
  };
2289
}
2290
2291
/// The ARM code here follows the Itanium code closely enough that we
2292
/// just special-case it at particular places.
2293
void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2294
                                    const VarDecl &D,
2295
                                    llvm::GlobalVariable *var,
2296
7.28k
                                    bool shouldPerformInit) {
2297
7.28k
  CGBuilderTy &Builder = CGF.Builder;
2298
2299
  // Inline variables that weren't instantiated from variable templates have
2300
  // partially-ordered initialization within their translation unit.
2301
7.28k
  bool NonTemplateInline =
2302
7.28k
      D.isInline() &&
2303
52
      !isTemplateInstantiation(D.getTemplateSpecializationKind());
2304
2305
  // We only need to use thread-safe statics for local non-TLS variables and
2306
  // inline variables; other global initialization is always single-threaded
2307
  // or (through lazy dynamic loading in multiple threads) unsequenced.
2308
7.28k
  bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2309
399
                    (D.isLocalVarDecl() || 
NonTemplateInline173
) &&
2310
237
                    !D.getTLSKind();
2311
2312
  // If we have a global variable with internal linkage and thread-safe statics
2313
  // are disabled, we can just let the guard variable be of type i8.
2314
7.28k
  bool useInt8GuardVariable = !threadsafe && 
var->hasInternalLinkage()7.11k
;
2315
2316
7.28k
  llvm::IntegerType *guardTy;
2317
7.28k
  CharUnits guardAlignment;
2318
7.28k
  if (useInt8GuardVariable) {
2319
6.93k
    guardTy = CGF.Int8Ty;
2320
6.93k
    guardAlignment = CharUnits::One();
2321
345
  } else {
2322
    // Guard variables are 64 bits in the generic ABI and size width on ARM
2323
    // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2324
345
    if (UseARMGuardVarABI) {
2325
13
      guardTy = CGF.SizeTy;
2326
13
      guardAlignment = CGF.getSizeAlign();
2327
332
    } else {
2328
332
      guardTy = CGF.Int64Ty;
2329
332
      guardAlignment = CharUnits::fromQuantity(
2330
332
                             CGM.getDataLayout().getABITypeAlignment(guardTy));
2331
332
    }
2332
345
  }
2333
7.28k
  llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
2334
2335
  // Create the guard variable if we don't already have it (as we
2336
  // might if we're double-emitting this function body).
2337
7.28k
  llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2338
7.28k
  if (!guard) {
2339
    // Mangle the name for the guard.
2340
7.28k
    SmallString<256> guardName;
2341
7.28k
    {
2342
7.28k
      llvm::raw_svector_ostream out(guardName);
2343
7.28k
      getMangleContext().mangleStaticGuardVariable(&D, out);
2344
7.28k
    }
2345
2346
    // Create the guard variable with a zero-initializer.
2347
    // Just absorb linkage and visibility from the guarded variable.
2348
7.28k
    guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2349
7.28k
                                     false, var->getLinkage(),
2350
7.28k
                                     llvm::ConstantInt::get(guardTy, 0),
2351
7.28k
                                     guardName.str());
2352
7.28k
    guard->setDSOLocal(var->isDSOLocal());
2353
7.28k
    guard->setVisibility(var->getVisibility());
2354
    // If the variable is thread-local, so is its guard variable.
2355
7.28k
    guard->setThreadLocalMode(var->getThreadLocalMode());
2356
7.28k
    guard->setAlignment(guardAlignment.getAsAlign());
2357
2358
    // The ABI says: "It is suggested that it be emitted in the same COMDAT
2359
    // group as the associated data object." In practice, this doesn't work for
2360
    // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2361
7.28k
    llvm::Comdat *C = var->getComdat();
2362
7.28k
    if (!D.isLocalVarDecl() && 
C173
&&
2363
134
        (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2364
134
         
CGM.getTarget().getTriple().isOSBinFormatWasm()0
)) {
2365
134
      guard->setComdat(C);
2366
      // An inline variable's guard function is run from the per-TU
2367
      // initialization function, not via a dedicated global ctor function, so
2368
      // we can't put it in a comdat.
2369
134
      if (!NonTemplateInline)
2370
125
        CGF.CurFn->setComdat(C);
2371
7.14k
    } else if (CGM.supportsCOMDAT() && 
guard->isWeakForLinker()163
) {
2372
31
      guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2373
31
    }
2374
2375
7.28k
    CGM.setStaticLocalDeclGuardAddress(&D, guard);
2376
7.28k
  }
2377
2378
7.28k
  Address guardAddr = Address(guard, guardAlignment);
2379
2380
  // Test whether the variable has completed initialization.
2381
  //
2382
  // Itanium C++ ABI 3.3.2:
2383
  //   The following is pseudo-code showing how these functions can be used:
2384
  //     if (obj_guard.first_byte == 0) {
2385
  //       if ( __cxa_guard_acquire (&obj_guard) ) {
2386
  //         try {
2387
  //           ... initialize the object ...;
2388
  //         } catch (...) {
2389
  //            __cxa_guard_abort (&obj_guard);
2390
  //            throw;
2391
  //         }
2392
  //         ... queue object destructor with __cxa_atexit() ...;
2393
  //         __cxa_guard_release (&obj_guard);
2394
  //       }
2395
  //     }
2396
2397
  // Load the first byte of the guard variable.
2398
7.28k
  llvm::LoadInst *LI =
2399
7.28k
      Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2400
2401
  // Itanium ABI:
2402
  //   An implementation supporting thread-safety on multiprocessor
2403
  //   systems must also guarantee that references to the initialized
2404
  //   object do not occur before the load of the initialization flag.
2405
  //
2406
  // In LLVM, we do this by marking the load Acquire.
2407
7.28k
  if (threadsafe)
2408
170
    LI->setAtomic(llvm::AtomicOrdering::Acquire);
2409
2410
  // For ARM, we should only check the first bit, rather than the entire byte:
2411
  //
2412
  // ARM C++ ABI 3.2.3.1:
2413
  //   To support the potential use of initialization guard variables
2414
  //   as semaphores that are the target of ARM SWP and LDREX/STREX
2415
  //   synchronizing instructions we define a static initialization
2416
  //   guard variable to be a 4-byte aligned, 4-byte word with the
2417
  //   following inline access protocol.
2418
  //     #define INITIALIZED 1
2419
  //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
2420
  //       if (__cxa_guard_acquire(&obj_guard))
2421
  //         ...
2422
  //     }
2423
  //
2424
  // and similarly for ARM64:
2425
  //
2426
  // ARM64 C++ ABI 3.2.2:
2427
  //   This ABI instead only specifies the value bit 0 of the static guard
2428
  //   variable; all other bits are platform defined. Bit 0 shall be 0 when the
2429
  //   variable is not initialized and 1 when it is.
2430
7.28k
  llvm::Value *V =
2431
7.28k
      (UseARMGuardVarABI && 
!useInt8GuardVariable13
)
2432
13
          ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2433
7.27k
          : LI;
2434
7.28k
  llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2435
2436
7.28k
  llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2437
7.28k
  llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2438
2439
  // Check if the first byte of the guard variable is zero.
2440
7.28k
  CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2441
7.28k
                               CodeGenFunction::GuardKind::VariableGuard, &D);
2442
2443
7.28k
  CGF.EmitBlock(InitCheckBlock);
2444
2445
  // Variables used when coping with thread-safe statics and exceptions.
2446
7.28k
  if (threadsafe) {
2447
    // Call __cxa_guard_acquire.
2448
170
    llvm::Value *V
2449
170
      = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2450
2451
170
    llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2452
2453
170
    Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2454
170
                         InitBlock, EndBlock);
2455
2456
    // Call __cxa_guard_abort along the exceptional edge.
2457
170
    CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2458
2459
170
    CGF.EmitBlock(InitBlock);
2460
170
  }
2461
2462
  // Emit the initializer and add a global destructor if appropriate.
2463
7.28k
  CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2464
2465
7.28k
  if (threadsafe) {
2466
    // Pop the guard-abort cleanup if we pushed one.
2467
170
    CGF.PopCleanupBlock();
2468
2469
    // Call __cxa_guard_release.  This cannot throw.
2470
170
    CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2471
170
                                guardAddr.getPointer());
2472
7.11k
  } else {
2473
7.11k
    Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
2474
7.11k
  }
2475
2476
7.28k
  CGF.EmitBlock(EndBlock);
2477
7.28k
}
2478
2479
/// Register a global destructor using __cxa_atexit.
2480
static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2481
                                        llvm::FunctionCallee dtor,
2482
4.53k
                                        llvm::Constant *addr, bool TLS) {
2483
4.53k
  assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2484
4.53k
         "__cxa_atexit is disabled");
2485
4.53k
  const char *Name = "__cxa_atexit";
2486
4.53k
  if (TLS) {
2487
120
    const llvm::Triple &T = CGF.getTarget().getTriple();
2488
100
    Name = T.isOSDarwin() ?  
"_tlv_atexit"20
: "__cxa_thread_atexit";
2489
120
  }
2490
2491
  // We're assuming that the destructor function is something we can
2492
  // reasonably call with the default CC.  Go ahead and cast it to the
2493
  // right prototype.
2494
4.53k
  llvm::Type *dtorTy =
2495
4.53k
    llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2496
2497
  // Preserve address space of addr.
2498
4.52k
  auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 
05
;
2499
4.53k
  auto AddrInt8PtrTy =
2500
4.53k
      AddrAS ? 
CGF.Int8Ty->getPointerTo(AddrAS)0
: CGF.Int8PtrTy;
2501
2502
  // Create a variable that binds the atexit to this shared object.
2503
4.53k
  llvm::Constant *handle =
2504
4.53k
      CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2505
4.53k
  auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2506
4.53k
  GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2507
2508
  // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2509
4.53k
  llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2510
4.53k
  llvm::FunctionType *atexitTy =
2511
4.53k
    llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2512
2513
  // Fetch the actual function.
2514
4.53k
  llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2515
4.53k
  if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2516
4.53k
    fn->setDoesNotThrow();
2517
2518
4.53k
  if (!addr)
2519
    // addr is null when we are trying to register a dtor annotated with
2520
    // __attribute__((destructor)) in a constructor function. Using null here is
2521
    // okay because this argument is just passed back to the destructor
2522
    // function.
2523
5
    addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2524
2525
4.53k
  llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2526
4.53k
                             cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2527
4.53k
                         llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2528
4.53k
                         handle};
2529
4.53k
  CGF.EmitNounwindRuntimeCall(atexit, args);
2530
4.53k
}
2531
2532
28.8k
void CodeGenModule::registerGlobalDtorsWithAtExit() {
2533
6
  for (const auto &I : DtorsUsingAtExit) {
2534
6
    int Priority = I.first;
2535
6
    const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2536
2537
    // Create a function that registers destructors that have the same priority.
2538
    //
2539
    // Since constructor functions are run in non-descending order of their
2540
    // priorities, destructors are registered in non-descending order of their
2541
    // priorities, and since destructor functions are run in the reverse order
2542
    // of their registration, destructor functions are run in non-ascending
2543
    // order of their priorities.
2544
6
    CodeGenFunction CGF(*this);
2545
6
    std::string GlobalInitFnName =
2546
6
        std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2547
6
    llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
2548
6
    llvm::Function *GlobalInitFn = CreateGlobalInitOrCleanUpFunction(
2549
6
        FTy, GlobalInitFnName, getTypes().arrangeNullaryFunction(),
2550
6
        SourceLocation());
2551
6
    ASTContext &Ctx = getContext();
2552
6
    QualType ReturnTy = Ctx.VoidTy;
2553
6
    QualType FunctionTy = Ctx.getFunctionType(ReturnTy, llvm::None, {});
2554
6
    FunctionDecl *FD = FunctionDecl::Create(
2555
6
        Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
2556
6
        &Ctx.Idents.get(GlobalInitFnName), FunctionTy, nullptr, SC_Static,
2557
6
        false, false);
2558
6
    CGF.StartFunction(GlobalDecl(FD), ReturnTy, GlobalInitFn,
2559
6
                      getTypes().arrangeNullaryFunction(), FunctionArgList(),
2560
6
                      SourceLocation(), SourceLocation());
2561
2562
10
    for (auto *Dtor : Dtors) {
2563
      // Register the destructor function calling __cxa_atexit if it is
2564
      // available. Otherwise fall back on calling atexit.
2565
10
      if (getCodeGenOpts().CXAAtExit)
2566
5
        emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2567
5
      else
2568
5
        CGF.registerGlobalDtorWithAtExit(Dtor);
2569
10
    }
2570
2571
6
    CGF.FinishFunction();
2572
6
    AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2573
6
  }
2574
28.8k
}
2575
2576
/// Register a global destructor as best as we know how.
2577
void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2578
                                       llvm::FunctionCallee dtor,
2579
4.55k
                                       llvm::Constant *addr) {
2580
4.55k
  if (D.isNoDestroy(CGM.getContext()))
2581
0
    return;
2582
2583
  // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2584
  // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2585
  // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2586
  // We can always use __cxa_thread_atexit.
2587
4.55k
  if (CGM.getCodeGenOpts().CXAAtExit || 
D.getTLSKind()44
)
2588
4.52k
    return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2589
2590
  // In Apple kexts, we want to add a global destructor entry.
2591
  // FIXME: shouldn't this be guarded by some variable?
2592
24
  if (CGM.getLangOpts().AppleKext) {
2593
    // Generate a global destructor entry.
2594
6
    return CGM.AddCXXDtorEntry(dtor, addr);
2595
6
  }
2596
2597
18
  CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2598
18
}
2599
2600
static bool isThreadWrapperReplaceable(const VarDecl *VD,
2601
865
                                       CodeGen::CodeGenModule &CGM) {
2602
865
  assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2603
  // Darwin prefers to have references to thread local variables to go through
2604
  // the thread wrapper instead of directly referencing the backing variable.
2605
865
  return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2606
865
         CGM.getTarget().getTriple().isOSDarwin();
2607
865
}
2608
2609
/// Get the appropriate linkage for the wrapper function. This is essentially
2610
/// the weak form of the variable's linkage; every translation unit which needs
2611
/// the wrapper emits a copy, and we want the linker to merge them.
2612
static llvm::GlobalValue::LinkageTypes
2613
239
getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2614
239
  llvm::GlobalValue::LinkageTypes VarLinkage =
2615
239
      CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2616
2617
  // For internal linkage variables, we don't need an external or weak wrapper.
2618
239
  if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2619
14
    return VarLinkage;
2620
2621
  // If the thread wrapper is replaceable, give it appropriate linkage.
2622
225
  if (isThreadWrapperReplaceable(VD, CGM))
2623
58
    if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2624
51
        !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2625
44
      return VarLinkage;
2626
181
  return llvm::GlobalValue::WeakODRLinkage;
2627
181
}
2628
2629
llvm::Function *
2630
ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2631
285
                                             llvm::Value *Val) {
2632
  // Mangle the name for the thread_local wrapper function.
2633
285
  SmallString<256> WrapperName;
2634
285
  {
2635
285
    llvm::raw_svector_ostream Out(WrapperName);
2636
285
    getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2637
285
  }
2638
2639
  // FIXME: If VD is a definition, we should regenerate the function attributes
2640
  // before returning.
2641
285
  if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2642
46
    return cast<llvm::Function>(V);
2643
2644
239
  QualType RetQT = VD->getType();
2645
239
  if (RetQT->isReferenceType())
2646
2
    RetQT = RetQT.getNonReferenceType();
2647
2648
239
  const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2649
239
      getContext().getPointerType(RetQT), FunctionArgList());
2650
2651
239
  llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2652
239
  llvm::Function *Wrapper =
2653
239
      llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2654
239
                             WrapperName.str(), &CGM.getModule());
2655
2656
239
  if (CGM.supportsCOMDAT() && 
Wrapper->isWeakForLinker()179
)
2657
167
    Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2658
2659
239
  CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper);
2660
2661
  // Always resolve references to the wrapper at link time.
2662
239
  if (!Wrapper->hasLocalLinkage())
2663
225
    if (!isThreadWrapperReplaceable(VD, CGM) ||
2664
58
        llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2665
58
        llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2666
44
        VD->getVisibility() == HiddenVisibility)
2667
182
      Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2668
2669
239
  if (isThreadWrapperReplaceable(VD, CGM)) {
2670
60
    Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2671
60
    Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2672
60
  }
2673
2674
239
  ThreadWrappers.push_back({VD, Wrapper});
2675
239
  return Wrapper;
2676
239
}
2677
2678
void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2679
    CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2680
    ArrayRef<llvm::Function *> CXXThreadLocalInits,
2681
28.1k
    ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2682
28.1k
  llvm::Function *InitFunc = nullptr;
2683
2684
  // Separate initializers into those with ordered (or partially-ordered)
2685
  // initialization and those with unordered initialization.
2686
28.1k
  llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2687
28.1k
  llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2688
28.3k
  for (unsigned I = 0; I != CXXThreadLocalInits.size(); 
++I167
) {
2689
167
    if (isTemplateInstantiation(
2690
167
            CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2691
75
      UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2692
75
          CXXThreadLocalInits[I];
2693
92
    else
2694
92
      OrderedInits.push_back(CXXThreadLocalInits[I]);
2695
167
  }
2696
2697
28.1k
  if (!OrderedInits.empty()) {
2698
    // Generate a guarded initialization function.
2699
49
    llvm::FunctionType *FTy =
2700
49
        llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2701
49
    const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2702
49
    InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2703
49
                                                     SourceLocation(),
2704
49
                                                     /*TLS=*/true);
2705
49
    llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2706
49
        CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2707
49
        llvm::GlobalVariable::InternalLinkage,
2708
49
        llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2709
49
    Guard->setThreadLocal(true);
2710
49
    Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2711
2712
49
    CharUnits GuardAlign = CharUnits::One();
2713
49
    Guard->setAlignment(GuardAlign.getAsAlign());
2714
2715
49
    CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2716
49
        InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
2717
    // On Darwin platforms, use CXX_FAST_TLS calling convention.
2718
49
    if (CGM.getTarget().getTriple().isOSDarwin()) {
2719
12
      InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2720
12
      InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2721
12
    }
2722
49
  }
2723
2724
  // Create declarations for thread wrappers for all thread-local variables
2725
  // with non-discardable definitions in this translation unit.
2726
306
  for (const VarDecl *VD : CXXThreadLocals) {
2727
306
    if (VD->hasDefinition() &&
2728
247
        !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2729
128
      llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2730
128
      getOrCreateThreadLocalWrapper(VD, GV);
2731
128
    }
2732
306
  }
2733
2734
  // Emit all referenced thread wrappers.
2735
239
  for (auto VDAndWrapper : ThreadWrappers) {
2736
239
    const VarDecl *VD = VDAndWrapper.first;
2737
239
    llvm::GlobalVariable *Var =
2738
239
        cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2739
239
    llvm::Function *Wrapper = VDAndWrapper.second;
2740
2741
    // Some targets require that all access to thread local variables go through
2742
    // the thread wrapper.  This means that we cannot attempt to create a thread
2743
    // wrapper or a thread helper.
2744
239
    if (!VD->hasDefinition()) {
2745
57
      if (isThreadWrapperReplaceable(VD, CGM)) {
2746
13
        Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2747
13
        continue;
2748
13
      }
2749
2750
      // If this isn't a TU in which this variable is defined, the thread
2751
      // wrapper is discardable.
2752
44
      if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2753
44
        Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2754
44
    }
2755
2756
226
    CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2757
2758
    // Mangle the name for the thread_local initialization function.
2759
226
    SmallString<256> InitFnName;
2760
226
    {
2761
226
      llvm::raw_svector_ostream Out(InitFnName);
2762
226
      getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2763
226
    }
2764
2765
226
    llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2766
2767
    // If we have a definition for the variable, emit the initialization
2768
    // function as an alias to the global Init function (if any). Otherwise,
2769
    // produce a declaration of the initialization function.
2770
226
    llvm::GlobalValue *Init = nullptr;
2771
226
    bool InitIsInitFunc = false;
2772
226
    bool HasConstantInitialization = false;
2773
226
    if (!usesThreadWrapperFunction(VD)) {
2774
61
      HasConstantInitialization = true;
2775
165
    } else if (VD->hasDefinition()) {
2776
121
      InitIsInitFunc = true;
2777
121
      llvm::Function *InitFuncToUse = InitFunc;
2778
121
      if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2779
39
        InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2780
121
      if (InitFuncToUse)
2781
119
        Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2782
119
                                         InitFuncToUse);
2783
44
    } else {
2784
      // Emit a weak global function referring to the initialization function.
2785
      // This function will not exist if the TU defining the thread_local
2786
      // variable in question does not need any dynamic initialization for
2787
      // its thread_local variables.
2788
44
      Init = llvm::Function::Create(InitFnTy,
2789
44
                                    llvm::GlobalVariable::ExternalWeakLinkage,
2790
44
                                    InitFnName.str(), &CGM.getModule());
2791
44
      const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2792
44
      CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2793
44
                                    cast<llvm::Function>(Init));
2794
44
    }
2795
2796
226
    if (Init) {
2797
163
      Init->setVisibility(Var->getVisibility());
2798
      // Don't mark an extern_weak function DSO local on windows.
2799
163
      if (!CGM.getTriple().isOSWindows() || 
!Init->hasExternalWeakLinkage()16
)
2800
161
        Init->setDSOLocal(Var->isDSOLocal());
2801
163
    }
2802
2803
226
    llvm::LLVMContext &Context = CGM.getModule().getContext();
2804
226
    llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2805
226
    CGBuilderTy Builder(CGM, Entry);
2806
226
    if (HasConstantInitialization) {
2807
      // No dynamic initialization to invoke.
2808
165
    } else if (InitIsInitFunc) {
2809
121
      if (Init) {
2810
119
        llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
2811
119
        if (isThreadWrapperReplaceable(VD, CGM)) {
2812
27
          CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2813
27
          llvm::Function *Fn =
2814
27
              cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2815
27
          Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2816
27
        }
2817
119
      }
2818
44
    } else {
2819
      // Don't know whether we have an init function. Call it if it exists.
2820
44
      llvm::Value *Have = Builder.CreateIsNotNull(Init);
2821
44
      llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2822
44
      llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2823
44
      Builder.CreateCondBr(Have, InitBB, ExitBB);
2824
2825
44
      Builder.SetInsertPoint(InitBB);
2826
44
      Builder.CreateCall(InitFnTy, Init);
2827
44
      Builder.CreateBr(ExitBB);
2828
2829
44
      Builder.SetInsertPoint(ExitBB);
2830
44
    }
2831
2832
    // For a reference, the result of the wrapper function is a pointer to
2833
    // the referenced object.
2834
226
    llvm::Value *Val = Var;
2835
226
    if (VD->getType()->isReferenceType()) {
2836
2
      CharUnits Align = CGM.getContext().getDeclAlign(VD);
2837
2
      Val = Builder.CreateAlignedLoad(Val, Align);
2838
2
    }
2839
226
    if (Val->getType() != Wrapper->getReturnType())
2840
0
      Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2841
0
          Val, Wrapper->getReturnType(), "");
2842
226
    Builder.CreateRet(Val);
2843
226
  }
2844
28.1k
}
2845
2846
LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2847
                                                   const VarDecl *VD,
2848
157
                                                   QualType LValType) {
2849
157
  llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
2850
157
  llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2851
2852
157
  llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
2853
157
  CallVal->setCallingConv(Wrapper->getCallingConv());
2854
2855
157
  LValue LV;
2856
157
  if (VD->getType()->isReferenceType())
2857
2
    LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
2858
155
  else
2859
155
    LV = CGF.MakeAddrLValue(CallVal, LValType,
2860
155
                            CGF.getContext().getDeclAlign(VD));
2861
  // FIXME: need setObjCGCLValueClass?
2862
157
  return LV;
2863
157
}
2864
2865
/// Return whether the given global decl needs a VTT parameter, which it does
2866
/// if it's a base constructor or destructor with virtual bases.
2867
144k
bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2868
144k
  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2869
2870
  // We don't have any virtual bases, just return early.
2871
144k
  if (!MD->getParent()->getNumVBases())
2872
141k
    return false;
2873
2874
  // Check if we have a base constructor.
2875
2.96k
  if (isa<CXXConstructorDecl>(MD) && 
GD.getCtorType() == Ctor_Base1.98k
)
2876
950
    return true;
2877
2878
  // Check if we have a base destructor.
2879
2.01k
  if (isa<CXXDestructorDecl>(MD) && 
GD.getDtorType() == Dtor_Base983
)
2880
386
    return true;
2881
2882
1.63k
  return false;
2883
1.63k
}
2884
2885
namespace {
2886
class ItaniumRTTIBuilder {
2887
  CodeGenModule &CGM;  // Per-module state.
2888
  llvm::LLVMContext &VMContext;
2889
  const ItaniumCXXABI &CXXABI;  // Per-module state.
2890
2891
  /// Fields - The fields of the RTTI descriptor currently being built.
2892
  SmallVector<llvm::Constant *, 16> Fields;
2893
2894
  /// GetAddrOfTypeName - Returns the mangled type name of the given type.
2895
  llvm::GlobalVariable *
2896
  GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
2897
2898
  /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
2899
  /// descriptor of the given type.
2900
  llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
2901
2902
  /// BuildVTablePointer - Build the vtable pointer for the given type.
2903
  void BuildVTablePointer(const Type *Ty);
2904
2905
  /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2906
  /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
2907
  void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
2908
2909
  /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2910
  /// classes with bases that do not satisfy the abi::__si_class_type_info
2911
  /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
2912
  void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
2913
2914
  /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
2915
  /// for pointer types.
2916
  void BuildPointerTypeInfo(QualType PointeeTy);
2917
2918
  /// BuildObjCObjectTypeInfo - Build the appropriate kind of
2919
  /// type_info for an object type.
2920
  void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
2921
2922
  /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
2923
  /// struct, used for member pointer types.
2924
  void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
2925
2926
public:
2927
  ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
2928
5.05k
      : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
2929
2930
  // Pointer type info flags.
2931
  enum {
2932
    /// PTI_Const - Type has const qualifier.
2933
    PTI_Const = 0x1,
2934
2935
    /// PTI_Volatile - Type has volatile qualifier.
2936
    PTI_Volatile = 0x2,
2937
2938
    /// PTI_Restrict - Type has restrict qualifier.
2939
    PTI_Restrict = 0x4,
2940
2941
    /// PTI_Incomplete - Type is incomplete.
2942
    PTI_Incomplete = 0x8,
2943
2944
    /// PTI_ContainingClassIncomplete - Containing class is incomplete.
2945
    /// (in pointer to member).
2946
    PTI_ContainingClassIncomplete = 0x10,
2947
2948
    /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
2949
    //PTI_TransactionSafe = 0x20,
2950
2951
    /// PTI_Noexcept - Pointee is noexcept function (C++1z).
2952
    PTI_Noexcept = 0x40,
2953
  };
2954
2955
  // VMI type info flags.
2956
  enum {
2957
    /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
2958
    VMI_NonDiamondRepeat = 0x1,
2959
2960
    /// VMI_DiamondShaped - Class is diamond shaped.
2961
    VMI_DiamondShaped = 0x2
2962
  };
2963
2964
  // Base class type info flags.
2965
  enum {
2966
    /// BCTI_Virtual - Base class is virtual.
2967
    BCTI_Virtual = 0x1,
2968
2969
    /// BCTI_Public - Base class is public.
2970
    BCTI_Public = 0x2
2971
  };
2972
2973
  /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
2974
  /// link to an existing RTTI descriptor if one already exists.
2975
  llvm::Constant *BuildTypeInfo(QualType Ty);
2976
2977
  /// BuildTypeInfo - Build the RTTI type info struct for the given type.
2978
  llvm::Constant *BuildTypeInfo(
2979
      QualType Ty,
2980
      llvm::GlobalVariable::LinkageTypes Linkage,
2981
      llvm::GlobalValue::VisibilityTypes Visibility,
2982
      llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
2983
};
2984
}
2985
2986
llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
2987
2.56k
    QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
2988
2.56k
  SmallString<256> Name;
2989
2.56k
  llvm::raw_svector_ostream Out(Name);
2990
2.56k
  CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
2991
2992
  // We know that the mangled name of the type starts at index 4 of the
2993
  // mangled name of the typename, so we can just index into it in order to
2994
  // get the mangled name of the type.
2995
2.56k
  llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
2996
2.56k
                                                            Name.substr(4));
2997
2.56k
  auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
2998
2999
2.56k
  llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3000
2.56k
      Name, Init->getType(), Linkage, Align.getQuantity());
3001
3002
2.56k
  GV->setInitializer(Init);
3003
3004
2.56k
  return GV;
3005
2.56k
}
3006
3007
llvm::Constant *
3008
1.15k
ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3009
  // Mangle the RTTI name.
3010
1.15k
  SmallString<256> Name;
3011
1.15k
  llvm::raw_svector_ostream Out(Name);
3012
1.15k
  CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3013
3014
  // Look for an existing global.
3015
1.15k
  llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3016
3017
1.15k
  if (!GV) {
3018
    // Create a new global variable.
3019
    // Note for the future: If we would ever like to do deferred emission of
3020
    // RTTI, check if emitting vtables opportunistically need any adjustment.
3021
3022
818
    GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
3023
818
                                  /*isConstant=*/true,
3024
818
                                  llvm::GlobalValue::ExternalLinkage, nullptr,
3025
818
                                  Name);
3026
818
    const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3027
818
    CGM.setGVProperties(GV, RD);
3028
818
  }
3029
3030
1.15k
  return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3031
1.15k
}
3032
3033
/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3034
/// info for that type is defined in the standard library.
3035
299
static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3036
  // Itanium C++ ABI 2.9.2:
3037
  //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
3038
  //   the run-time support library. Specifically, the run-time support
3039
  //   library should contain type_info objects for the types X, X* and
3040
  //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3041
  //   unsigned char, signed char, short, unsigned short, int, unsigned int,
3042
  //   long, unsigned long, long long, unsigned long long, float, double,
3043
  //   long double, char16_t, char32_t, and the IEEE 754r decimal and
3044
  //   half-precision floating point types.
3045
  //
3046
  // GCC also emits RTTI for __int128.
3047
  // FIXME: We do not emit RTTI information for decimal types here.
3048
3049
  // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3050
299
  switch (Ty->getKind()) {
3051
273
    case BuiltinType::Void:
3052
273
    case BuiltinType::NullPtr:
3053
273
    case BuiltinType::Bool:
3054
273
    case BuiltinType::WChar_S:
3055
273
    case BuiltinType::WChar_U:
3056
273
    case BuiltinType::Char_U:
3057
273
    case BuiltinType::Char_S:
3058
273
    case BuiltinType::UChar:
3059
273
    case BuiltinType::SChar:
3060
273
    case BuiltinType::Short:
3061
273
    case BuiltinType::UShort:
3062
273
    case BuiltinType::Int:
3063
273
    case BuiltinType::UInt:
3064
273
    case BuiltinType::Long:
3065
273
    case BuiltinType::ULong:
3066
273
    case BuiltinType::LongLong:
3067
273
    case BuiltinType::ULongLong:
3068
273
    case BuiltinType::Half:
3069
273
    case BuiltinType::Float:
3070
273
    case BuiltinType::Double:
3071
273
    case BuiltinType::LongDouble:
3072
273
    case BuiltinType::Float16:
3073
273
    case BuiltinType::Float128:
3074
273
    case BuiltinType::Char8:
3075
273
    case BuiltinType::Char16:
3076
273
    case BuiltinType::Char32:
3077
273
    case BuiltinType::Int128:
3078
273
    case BuiltinType::UInt128:
3079
273
      return true;
3080
3081
273
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3082
936
    case BuiltinType::Id:
3083
936
#include 
"clang/Basic/OpenCLImageTypes.def"273
3084
936
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3085
312
    case BuiltinType::Id:
3086
312
#include 
"clang/Basic/OpenCLExtensionTypes.def"26
3087
26
    case BuiltinType::OCLSampler:
3088
26
    case BuiltinType::OCLEvent:
3089
26
    case BuiltinType::OCLClkEvent:
3090
26
    case BuiltinType::OCLQueue:
3091
26
    case BuiltinType::OCLReserveID:
3092
26
#define SVE_TYPE(Name, Id, SingletonId) \
3093
1.27k
    case BuiltinType::Id:
3094
1.27k
#include 
"clang/Basic/AArch64SVEACLETypes.def"26
3095
26
    case BuiltinType::ShortAccum:
3096
26
    case BuiltinType::Accum:
3097
26
    case BuiltinType::LongAccum:
3098
26
    case BuiltinType::UShortAccum:
3099
26
    case BuiltinType::UAccum:
3100
26
    case BuiltinType::ULongAccum:
3101
26
    case BuiltinType::ShortFract:
3102
26
    case BuiltinType::Fract:
3103
26
    case BuiltinType::LongFract:
3104
26
    case BuiltinType::UShortFract:
3105
26
    case BuiltinType::UFract:
3106
26
    case BuiltinType::ULongFract:
3107
26
    case BuiltinType::SatShortAccum:
3108
26
    case BuiltinType::SatAccum:
3109
26
    case BuiltinType::SatLongAccum:
3110
26
    case BuiltinType::SatUShortAccum:
3111
26
    case BuiltinType::SatUAccum:
3112
26
    case BuiltinType::SatULongAccum:
3113
26
    case BuiltinType::SatShortFract:
3114
26
    case BuiltinType::SatFract:
3115
26
    case BuiltinType::SatLongFract:
3116
26
    case BuiltinType::SatUShortFract:
3117
26
    case BuiltinType::SatUFract:
3118
26
    case BuiltinType::SatULongFract:
3119
26
    case BuiltinType::BFloat16:
3120
26
      return false;
3121
3122
0
    case BuiltinType::Dependent:
3123
0
#define BUILTIN_TYPE(Id, SingletonId)
3124
0
#define PLACEHOLDER_TYPE(Id, SingletonId) \
3125
0
    case BuiltinType::Id:
3126
0
#include "clang/AST/BuiltinTypes.def"
3127
0
      llvm_unreachable("asking for RRTI for a placeholder type!");
3128
3129
0
    case BuiltinType::ObjCId:
3130
0
    case BuiltinType::ObjCClass:
3131
0
    case BuiltinType::ObjCSel:
3132
0
      llvm_unreachable("FIXME: Objective-C types are unsupported!");
3133
0
  }
3134
3135
0
  llvm_unreachable("Invalid BuiltinType Kind!");
3136
0
}
3137
3138
60
static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3139
60
  QualType PointeeTy = PointerTy->getPointeeType();
3140
60
  const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3141
60
  if (!BuiltinTy)
3142
48
    return false;
3143
3144
  // Check the qualifiers.
3145
12
  Qualifiers Quals = PointeeTy.getQualifiers();
3146
12
  Quals.removeConst();
3147
3148
12
  if (!Quals.empty())
3149
0
    return false;
3150
3151
12
  return TypeInfoIsInStandardLibrary(BuiltinTy);
3152
12
}
3153
3154
/// IsStandardLibraryRTTIDescriptor - Returns whether the type
3155
/// information for the given type exists in the standard library.
3156
3.42k
static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3157
  // Type info for builtin types is defined in the standard library.
3158
3.42k
  if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3159
287
    return TypeInfoIsInStandardLibrary(BuiltinTy);
3160
3161
  // Type info for some pointer types to builtin types is defined in the
3162
  // standard library.
3163
3.13k
  if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3164
60
    return TypeInfoIsInStandardLibrary(PointerTy);
3165
3166
3.07k
  return false;
3167
3.07k
}
3168
3169
/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3170
/// the given type exists somewhere else, and that we should not emit the type
3171
/// information in this translation unit.  Assumes that it is not a
3172
/// standard-library type.
3173
static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3174
3.14k
                                            QualType Ty) {
3175
3.14k
  ASTContext &Context = CGM.getContext();
3176
3177
  // If RTTI is disabled, assume it might be disabled in the
3178
  // translation unit that defines any potential key function, too.
3179
3.14k
  if (!Context.getLangOpts().RTTI) 
return false7
;
3180
3181
3.14k
  if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3182
2.86k
    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3183
2.86k
    if (!RD->hasDefinition())
3184
3
      return false;
3185
3186
2.85k
    if (!RD->isDynamicClass())
3187
296
      return false;
3188
3189
    // FIXME: this may need to be reconsidered if the key function
3190
    // changes.
3191
    // N.B. We must always emit the RTTI data ourselves if there exists a key
3192
    // function.
3193
2.56k
    bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3194
3195
    // Don't import the RTTI but emit it locally.
3196
2.56k
    if (CGM.getTriple().isWindowsGNUEnvironment())
3197
88
      return false;
3198
3199
2.47k
    if (CGM.getVTables().isVTableExternal(RD))
3200
882
      return IsDLLImport && 
!CGM.getTriple().isWindowsItaniumEnvironment()2
3201
0
                 ? false
3202
882
                 : true;
3203
3204
1.59k
    if (IsDLLImport)
3205
0
      return true;
3206
1.87k
  }
3207
3208
1.87k
  return false;
3209
1.87k
}
3210
3211
/// IsIncompleteClassType - Returns whether the given record type is incomplete.
3212
2.10k
static bool IsIncompleteClassType(const RecordType *RecordTy) {
3213
2.10k
  return !RecordTy->getDecl()->isCompleteDefinition();
3214
2.10k
}
3215
3216
/// ContainsIncompleteClassType - Returns whether the given type contains an
3217
/// incomplete class type. This is true if
3218
///
3219
///   * The given type is an incomplete class type.
3220
///   * The given type is a pointer type whose pointee type contains an
3221
///     incomplete class type.
3222
///   * The given type is a member pointer type whose class is an incomplete
3223
///     class type.
3224
///   * The given type is a member pointer type whoise pointee type contains an
3225
///     incomplete class type.
3226
/// is an indirect or direct pointer to an incomplete class type.
3227
2.62k
static bool ContainsIncompleteClassType(QualType Ty) {
3228
2.62k
  if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3229
2.05k
    if (IsIncompleteClassType(RecordTy))
3230
32
      return true;
3231
2.59k
  }
3232
3233
2.59k
  if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3234
68
    return ContainsIncompleteClassType(PointerTy->getPointeeType());
3235
3236
2.52k
  if (const MemberPointerType *MemberPointerTy =
3237
25
      dyn_cast<MemberPointerType>(Ty)) {
3238
    // Check if the class type is incomplete.
3239
25
    const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3240
25
    if (IsIncompleteClassType(ClassType))
3241
12
      return true;
3242
3243
13
    return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3244
13
  }
3245
3246
2.49k
  return false;
3247
2.49k
}
3248
3249
// CanUseSingleInheritance - Return whether the given record decl has a "single,
3250
// public, non-virtual base at offset zero (i.e. the derived class is dynamic
3251
// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3252
2.02k
static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3253
  // Check the number of bases.
3254
2.02k
  if (RD->getNumBases() != 1)
3255
420
    return false;
3256
3257
  // Get the base.
3258
1.60k
  CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3259
3260
  // Check that the base is not virtual.
3261
1.60k
  if (Base->isVirtual())
3262
386
    return false;
3263
3264
  // Check that the base is public.
3265
1.21k
  if (Base->getAccessSpecifier() != AS_public)
3266
50
    return false;
3267
3268
  // Check that the class is dynamic iff the base is.
3269
1.16k
  auto *BaseDecl =
3270
1.16k
      cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3271
1.16k
  if (!BaseDecl->isEmpty() &&
3272
1.10k
      BaseDecl->isDynamicClass() != RD->isDynamicClass())
3273
6
    return false;
3274
3275
1.16k
  return true;
3276
1.16k
}
3277
3278
2.56k
void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3279
  // abi::__class_type_info.
3280
2.56k
  static const char * const ClassTypeInfo =
3281
2.56k
    "_ZTVN10__cxxabiv117__class_type_infoE";
3282
  // abi::__si_class_type_info.
3283
2.56k
  static const char * const SIClassTypeInfo =
3284
2.56k
    "_ZTVN10__cxxabiv120__si_class_type_infoE";
3285
  // abi::__vmi_class_type_info.
3286
2.56k
  static const char * const VMIClassTypeInfo =
3287
2.56k
    "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3288
3289
2.56k
  const char *VTableName = nullptr;
3290
3291
2.56k
  switch (Ty->getTypeClass()) {
3292
0
#define TYPE(Class, Base)
3293
0
#define ABSTRACT_TYPE(Class, Base)
3294
0
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3295
0
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3296
0
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3297
0
#include "clang/AST/TypeNodes.inc"
3298
0
    llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3299
3300
0
  case Type::LValueReference:
3301
0
  case Type::RValueReference:
3302
0
    llvm_unreachable("References shouldn't get here");
3303
3304
0
  case Type::Auto:
3305
0
  case Type::DeducedTemplateSpecialization:
3306
0
    llvm_unreachable("Undeduced type shouldn't get here");
3307
3308
0
  case Type::Pipe:
3309
0
    llvm_unreachable("Pipe types shouldn't get here");
3310
3311
200
  case Type::Builtin:
3312
200
  case Type::ExtInt:
3313
  // GCC treats vector and complex types as fundamental types.
3314
200
  case Type::Vector:
3315
200
  case Type::ExtVector:
3316
200
  case Type::ConstantMatrix:
3317
200
  case Type::Complex:
3318
200
  case Type::Atomic:
3319
  // FIXME: GCC treats block pointers as fundamental types?!
3320
200
  case Type::BlockPointer:
3321
    // abi::__fundamental_type_info.
3322
200
    VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3323
200
    break;
3324
3325
5
  case Type::ConstantArray:
3326
5
  case Type::IncompleteArray:
3327
5
  case Type::VariableArray:
3328
    // abi::__array_type_info.
3329
5
    VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3330
5
    break;
3331
3332
87
  case Type::FunctionNoProto:
3333
87
  case Type::FunctionProto:
3334
    // abi::__function_type_info.
3335
87
    VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3336
87
    break;
3337
3338
2
  case Type::Enum:
3339
    // abi::__enum_type_info.
3340
2
    VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3341
2
    break;
3342
3343
1.98k
  case Type::Record: {
3344
1.98k
    const CXXRecordDecl *RD =
3345
1.98k
      cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3346
3347
1.98k
    if (!RD->hasDefinition() || 
!RD->getNumBases()1.98k
) {
3348
974
      VTableName = ClassTypeInfo;
3349
1.01k
    } else if (CanUseSingleInheritance(RD)) {
3350
580
      VTableName = SIClassTypeInfo;
3351
431
    } else {
3352
431
      VTableName = VMIClassTypeInfo;
3353
431
    }
3354
3355
1.98k
    break;
3356
87
  }
3357
3358
7
  case Type::ObjCObject:
3359
    // Ignore protocol qualifiers.
3360
7
    Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3361
3362
    // Handle id and Class.
3363
7
    if (isa<BuiltinType>(Ty)) {
3364
6
      VTableName = ClassTypeInfo;
3365
6
      break;
3366
6
    }
3367
3368
1
    assert(isa<ObjCInterfaceType>(Ty));
3369
1
    LLVM_FALLTHROUGH;
3370
3371
3
  case Type::ObjCInterface:
3372
3
    if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3373
1
      VTableName = SIClassTypeInfo;
3374
2
    } else {
3375
2
      VTableName = ClassTypeInfo;
3376
2
    }
3377
3
    break;
3378
3379
256
  case Type::ObjCObjectPointer:
3380
256
  case Type::Pointer:
3381
    // abi::__pointer_type_info.
3382
256
    VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3383
256
    break;
3384
3385
21
  case Type::MemberPointer:
3386
    // abi::__pointer_to_member_type_info.
3387
21
    VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3388
21
    break;
3389
2.56k
  }
3390
3391
2.56k
  llvm::Constant *VTable = nullptr;
3392
3393
  // Check if the alias exists. If it doesn't, then get or create the global.
3394
2.56k
  if (CGM.getItaniumVTableContext().isRelativeLayout())
3395
45
    VTable = CGM.getModule().getNamedAlias(VTableName);
3396
2.56k
  if (!VTable)
3397
2.56k
    VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3398
3399
2.56k
  CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3400
3401
2.56k
  llvm::Type *PtrDiffTy =
3402
2.56k
      CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3403
3404
  // The vtable address point is 2.
3405
2.56k
  if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3406
    // The vtable address point is 8 bytes after its start:
3407
    // 4 for the offset to top + 4 for the relative offset to rtti.
3408
45
    llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3409
45
    VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3410
45
    VTable =
3411
45
        llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3412
2.52k
  } else {
3413
2.52k
    llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3414
2.52k
    VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
3415
2.52k
                                                          Two);
3416
2.52k
  }
3417
2.56k
  VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3418
3419
2.56k
  Fields.push_back(VTable);
3420
2.56k
}
3421
3422
/// Return the linkage that the type info and type info name constants
3423
/// should have for the given type.
3424
static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3425
2.26k
                                                             QualType Ty) {
3426
  // Itanium C++ ABI 2.9.5p7:
3427
  //   In addition, it and all of the intermediate abi::__pointer_type_info
3428
  //   structs in the chain down to the abi::__class_type_info for the
3429
  //   incomplete class type must be prevented from resolving to the
3430
  //   corresponding type_info structs for the complete class type, possibly
3431
  //   by making them local static objects. Finally, a dummy class RTTI is
3432
  //   generated for the incomplete type that will not resolve to the final
3433
  //   complete class RTTI (because the latter need not exist), possibly by
3434
  //   making it a local static object.
3435
2.26k
  if (ContainsIncompleteClassType(Ty))
3436
25
    return llvm::GlobalValue::InternalLinkage;
3437
3438
2.24k
  switch (Ty->getLinkage()) {
3439
137
  case NoLinkage:
3440
137
  case InternalLinkage:
3441
137
  case UniqueExternalLinkage:
3442
137
    return llvm::GlobalValue::InternalLinkage;
3443
3444
2.10k
  case VisibleNoLinkage:
3445
2.10k
  case ModuleInternalLinkage:
3446
2.10k
  case ModuleLinkage:
3447
2.10k
  case ExternalLinkage:
3448
    // RTTI is not enabled, which means that this type info struct is going
3449
    // to be used for exception handling. Give it linkonce_odr linkage.
3450
2.10k
    if (!CGM.getLangOpts().RTTI)
3451
7
      return llvm::GlobalValue::LinkOnceODRLinkage;
3452
3453
2.09k
    if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3454
1.85k
      const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3455
1.85k
      if (RD->hasAttr<WeakAttr>())
3456
5
        return llvm::GlobalValue::WeakODRLinkage;
3457
1.85k
      if (CGM.getTriple().isWindowsItaniumEnvironment())
3458
9
        if (RD->hasAttr<DLLImportAttr>() &&
3459
1
            ShouldUseExternalRTTIDescriptor(CGM, Ty))
3460
0
          return llvm::GlobalValue::ExternalLinkage;
3461
      // MinGW always uses LinkOnceODRLinkage for type info.
3462
1.85k
      if (RD->isDynamicClass() &&
3463
1.58k
          !CGM.getContext()
3464
1.58k
               .getTargetInfo()
3465
1.58k
               .getTriple()
3466
1.58k
               .isWindowsGNUEnvironment())
3467
1.49k
        return CGM.getVTableLinkage(RD);
3468
597
    }
3469
3470
597
    return llvm::GlobalValue::LinkOnceODRLinkage;
3471
0
  }
3472
3473
0
  llvm_unreachable("Invalid linkage!");
3474
0
}
3475
3476
4.75k
llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3477
  // We want to operate on the canonical type.
3478
4.75k
  Ty = Ty.getCanonicalType();
3479
3480
  // Check if we've already emitted an RTTI descriptor for this type.
3481
4.75k
  SmallString<256> Name;
3482
4.75k
  llvm::raw_svector_ostream Out(Name);
3483
4.75k
  CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3484
3485
4.75k
  llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3486
4.75k
  if (OldGV && 
!OldGV->isDeclaration()1.69k
) {
3487
1.33k
    assert(!OldGV->hasAvailableExternallyLinkage() &&
3488
1.33k
           "available_externally typeinfos not yet implemented");
3489
3490
1.33k
    return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3491
1.33k
  }
3492
3493
  // Check if there is already an external RTTI descriptor for this type.
3494
3.42k
  if (IsStandardLibraryRTTIDescriptor(Ty) ||
3495
3.14k
      ShouldUseExternalRTTIDescriptor(CGM, Ty))
3496
1.15k
    return GetAddrOfExternalRTTIDescriptor(Ty);
3497
3498
  // Emit the standard library with external linkage.
3499
2.26k
  llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3500
3501
  // Give the type_info object and name the formal visibility of the
3502
  // type itself.
3503
2.26k
  llvm::GlobalValue::VisibilityTypes llvmVisibility;
3504
2.26k
  if (llvm::GlobalValue::isLocalLinkage(Linkage))
3505
    // If the linkage is local, only default visibility makes sense.
3506
164
    llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3507
2.10k
  else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3508
2.10k
           ItaniumCXXABI::RUK_NonUniqueHidden)
3509
6
    llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3510
2.09k
  else
3511
2.09k
    llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3512
3513
2.26k
  llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3514
2.26k
      llvm::GlobalValue::DefaultStorageClass;
3515
2.26k
  if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3516
10
    auto RD = Ty->getAsCXXRecordDecl();
3517
10
    if (RD && RD->hasAttr<DLLExportAttr>())
3518
4
      DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3519
10
  }
3520
3521
2.26k
  return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3522
2.26k
}
3523
3524
llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3525
      QualType Ty,
3526
      llvm::GlobalVariable::LinkageTypes Linkage,
3527
      llvm::GlobalValue::VisibilityTypes Visibility,
3528
2.56k
      llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3529
  // Add the vtable pointer.
3530
2.56k
  BuildVTablePointer(cast<Type>(Ty));
3531
3532
  // And the name.
3533
2.56k
  llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3534
2.56k
  llvm::Constant *TypeNameField;
3535
3536
  // If we're supposed to demote the visibility, be sure to set a flag
3537
  // to use a string comparison for type_info comparisons.
3538
2.56k
  ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3539
2.56k
      CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3540
2.56k
  if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3541
    // The flag is the sign bit, which on ARM64 is defined to be clear
3542
    // for global pointers.  This is very ARM64-specific.
3543
8
    TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3544
8
    llvm::Constant *flag =
3545
8
        llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3546
8
    TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3547
8
    TypeNameField =
3548
8
        llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3549
2.55k
  } else {
3550
2.55k
    TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3551
2.55k
  }
3552
2.56k
  Fields.push_back(TypeNameField);
3553
3554
2.56k
  switch (Ty->getTypeClass()) {
3555
0
#define TYPE(Class, Base)
3556
0
#define ABSTRACT_TYPE(Class, Base)
3557
0
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3558
0
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3559
0
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3560
0
#include "clang/AST/TypeNodes.inc"
3561
0
    llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3562
3563
  // GCC treats vector types as fundamental types.
3564
194
  case Type::Builtin:
3565
194
  case Type::Vector:
3566
194
  case Type::ExtVector:
3567
194
  case Type::ConstantMatrix:
3568
194
  case Type::Complex:
3569
194
  case Type::BlockPointer:
3570
    // Itanium C++ ABI 2.9.5p4:
3571
    // abi::__fundamental_type_info adds no data members to std::type_info.
3572
194
    break;
3573
3574
0
  case Type::LValueReference:
3575
0
  case Type::RValueReference:
3576
0
    llvm_unreachable("References shouldn't get here");
3577
3578
0
  case Type::Auto:
3579
0
  case Type::DeducedTemplateSpecialization:
3580
0
    llvm_unreachable("Undeduced type shouldn't get here");
3581
3582
0
  case Type::Pipe:
3583
0
    break;
3584
3585
6
  case Type::ExtInt:
3586
6
    break;
3587
3588
5
  case Type::ConstantArray:
3589
5
  case Type::IncompleteArray:
3590
5
  case Type::VariableArray:
3591
    // Itanium C++ ABI 2.9.5p5:
3592
    // abi::__array_type_info adds no data members to std::type_info.
3593
5
    break;
3594
3595
87
  case Type::FunctionNoProto:
3596
87
  case Type::FunctionProto:
3597
    // Itanium C++ ABI 2.9.5p5:
3598
    // abi::__function_type_info adds no data members to std::type_info.
3599
87
    break;
3600
3601
2
  case Type::Enum:
3602
    // Itanium C++ ABI 2.9.5p5:
3603
    // abi::__enum_type_info adds no data members to std::type_info.
3604
2
    break;
3605
3606
1.98k
  case Type::Record: {
3607
1.98k
    const CXXRecordDecl *RD =
3608
1.98k
      cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3609
1.98k
    if (!RD->hasDefinition() || 
!RD->getNumBases()1.98k
) {
3610
      // We don't need to emit any fields.
3611
974
      break;
3612
974
    }
3613
3614
1.01k
    if (CanUseSingleInheritance(RD))
3615
580
      BuildSIClassTypeInfo(RD);
3616
431
    else
3617
431
      BuildVMIClassTypeInfo(RD);
3618
3619
1.01k
    break;
3620
1.01k
  }
3621
3622
9
  case Type::ObjCObject:
3623
9
  case Type::ObjCInterface:
3624
9
    BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3625
9
    break;
3626
3627
8
  case Type::ObjCObjectPointer:
3628
8
    BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3629
8
    break;
3630
3631
248
  case Type::Pointer:
3632
248
    BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3633
248
    break;
3634
3635
21
  case Type::MemberPointer:
3636
21
    BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3637
21
    break;
3638
3639
0
  case Type::Atomic:
3640
    // No fields, at least for the moment.
3641
0
    break;
3642
2.56k
  }
3643
3644
2.56k
  llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3645
3646
2.56k
  SmallString<256> Name;
3647
2.56k
  llvm::raw_svector_ostream Out(Name);
3648
2.56k
  CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3649
2.56k
  llvm::Module &M = CGM.getModule();
3650
2.56k
  llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3651
2.56k
  llvm::GlobalVariable *GV =
3652
2.56k
      new llvm::GlobalVariable(M, Init->getType(),
3653
2.56k
                               /*isConstant=*/true, Linkage, Init, Name);
3654
3655
  // If there's already an old global variable, replace it with the new one.
3656
2.56k
  if (OldGV) {
3657
20
    GV->takeName(OldGV);
3658
20
    llvm::Constant *NewPtr =
3659
20
      llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3660
20
    OldGV->replaceAllUsesWith(NewPtr);
3661
20
    OldGV->eraseFromParent();
3662
20
  }
3663
3664
2.56k
  if (CGM.supportsCOMDAT() && 
GV->isWeakForLinker()1.00k
)
3665
651
    GV->setComdat(M.getOrInsertComdat(GV->getName()));
3666
3667
2.56k
  CharUnits Align =
3668
2.56k
      CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3669
2.56k
  GV->setAlignment(Align.getAsAlign());
3670
3671
  // The Itanium ABI specifies that type_info objects must be globally
3672
  // unique, with one exception: if the type is an incomplete class
3673
  // type or a (possibly indirect) pointer to one.  That exception
3674
  // affects the general case of comparing type_info objects produced
3675
  // by the typeid operator, which is why the comparison operators on
3676
  // std::type_info generally use the type_info name pointers instead
3677
  // of the object addresses.  However, the language's built-in uses
3678
  // of RTTI generally require class types to be complete, even when
3679
  // manipulating pointers to those class types.  This allows the
3680
  // implementation of dynamic_cast to rely on address equality tests,
3681
  // which is much faster.
3682
3683
  // All of this is to say that it's important that both the type_info
3684
  // object and the type_info name be uniqued when weakly emitted.
3685
3686
2.56k
  TypeName->setVisibility(Visibility);
3687
2.56k
  CGM.setDSOLocal(TypeName);
3688
3689
2.56k
  GV->setVisibility(Visibility);
3690
2.56k
  CGM.setDSOLocal(GV);
3691
3692
2.56k
  TypeName->setDLLStorageClass(DLLStorageClass);
3693
2.56k
  GV->setDLLStorageClass(DLLStorageClass);
3694
3695
2.56k
  TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3696
2.56k
  GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3697
3698
2.56k
  return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3699
2.56k
}
3700
3701
/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3702
/// for the given Objective-C object type.
3703
9
void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3704
  // Drop qualifiers.
3705
9
  const Type *T = OT->getBaseType().getTypePtr();
3706
9
  assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3707
3708
  // The builtin types are abi::__class_type_infos and don't require
3709
  // extra fields.
3710
9
  if (isa<BuiltinType>(T)) 
return6
;
3711
3712
3
  ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3713
3
  ObjCInterfaceDecl *Super = Class->getSuperClass();
3714
3715
  // Root classes are also __class_type_info.
3716
3
  if (!Super) 
return2
;
3717
3718
1
  QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3719
3720
  // Everything else is single inheritance.
3721
1
  llvm::Constant *BaseTypeInfo =
3722
1
      ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3723
1
  Fields.push_back(BaseTypeInfo);
3724
1
}
3725
3726
/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3727
/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3728
580
void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3729
  // Itanium C++ ABI 2.9.5p6b:
3730
  // It adds to abi::__class_type_info a single member pointing to the
3731
  // type_info structure for the base type,
3732
580
  llvm::Constant *BaseTypeInfo =
3733
580
    ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3734
580
  Fields.push_back(BaseTypeInfo);
3735
580
}
3736
3737
namespace {
3738
  /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3739
  /// a class hierarchy.
3740
  struct SeenBases {
3741
    llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3742
    llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3743
  };
3744
}
3745
3746
/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3747
/// abi::__vmi_class_type_info.
3748
///
3749
static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3750
1.56k
                                             SeenBases &Bases) {
3751
3752
1.56k
  unsigned Flags = 0;
3753
3754
1.56k
  auto *BaseDecl =
3755
1.56k
      cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3756
3757
1.56k
  if (Base->isVirtual()) {
3758
    // Mark the virtual base as seen.
3759
815
    if (!Bases.VirtualBases.insert(BaseDecl).second) {
3760
      // If this virtual base has been seen before, then the class is diamond
3761
      // shaped.
3762
409
      Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3763
406
    } else {
3764
406
      if (Bases.NonVirtualBases.count(BaseDecl))
3765
19
        Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3766
406
    }
3767
751
  } else {
3768
    // Mark the non-virtual base as seen.
3769
751
    if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3770
      // If this non-virtual base has been seen before, then the class has non-
3771
      // diamond shaped repeated inheritance.
3772
249
      Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3773
502
    } else {
3774
502
      if (Bases.VirtualBases.count(BaseDecl))
3775
28
        Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3776
502
    }
3777
751
  }
3778
3779
  // Walk all bases.
3780
1.56k
  for (const auto &I : BaseDecl->bases())
3781
886
    Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3782
3783
1.56k
  return Flags;
3784
1.56k
}
3785
3786
431
static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3787
431
  unsigned Flags = 0;
3788
431
  SeenBases Bases;
3789
3790
  // Walk all bases.
3791
431
  for (const auto &I : RD->bases())
3792
680
    Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3793
3794
431
  return Flags;
3795
431
}
3796
3797
/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3798
/// classes with bases that do not satisfy the abi::__si_class_type_info
3799
/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3800
431
void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3801
431
  llvm::Type *UnsignedIntLTy =
3802
431
    CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3803
3804
  // Itanium C++ ABI 2.9.5p6c:
3805
  //   __flags is a word with flags describing details about the class
3806
  //   structure, which may be referenced by using the __flags_masks
3807
  //   enumeration. These flags refer to both direct and indirect bases.
3808
431
  unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3809
431
  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3810
3811
  // Itanium C++ ABI 2.9.5p6c:
3812
  //   __base_count is a word with the number of direct proper base class
3813
  //   descriptions that follow.
3814
431
  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3815
3816
431
  if (!RD->getNumBases())
3817
0
    return;
3818
3819
  // Now add the base class descriptions.
3820
3821
  // Itanium C++ ABI 2.9.5p6c:
3822
  //   __base_info[] is an array of base class descriptions -- one for every
3823
  //   direct proper base. Each description is of the type:
3824
  //
3825
  //   struct abi::__base_class_type_info {
3826
  //   public:
3827
  //     const __class_type_info *__base_type;
3828
  //     long __offset_flags;
3829
  //
3830
  //     enum __offset_flags_masks {
3831
  //       __virtual_mask = 0x1,
3832
  //       __public_mask = 0x2,
3833
  //       __offset_shift = 8
3834
  //     };
3835
  //   };
3836
3837
  // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
3838
  // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
3839
  // LLP64 platforms.
3840
  // FIXME: Consider updating libc++abi to match, and extend this logic to all
3841
  // LLP64 platforms.
3842
431
  QualType OffsetFlagsTy = CGM.getContext().LongTy;
3843
431
  const TargetInfo &TI = CGM.getContext().getTargetInfo();
3844
431
  if (TI.getTriple().isOSCygMing() && 
TI.getPointerWidth(0) > TI.getLongWidth()12
)
3845
6
    OffsetFlagsTy = CGM.getContext().LongLongTy;
3846
431
  llvm::Type *OffsetFlagsLTy =
3847
431
      CGM.getTypes().ConvertType(OffsetFlagsTy);
3848
3849
680
  for (const auto &Base : RD->bases()) {
3850
    // The __base_type member points to the RTTI for the base type.
3851
680
    Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3852
3853
680
    auto *BaseDecl =
3854
680
        cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
3855
3856
680
    int64_t OffsetFlags = 0;
3857
3858
    // All but the lower 8 bits of __offset_flags are a signed offset.
3859
    // For a non-virtual base, this is the offset in the object of the base
3860
    // subobject. For a virtual base, this is the offset in the virtual table of
3861
    // the virtual base offset for the virtual base referenced (negative).
3862
680
    CharUnits Offset;
3863
680
    if (Base.isVirtual())
3864
291
      Offset =
3865
291
        CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3866
389
    else {
3867
389
      const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3868
389
      Offset = Layout.getBaseClassOffset(BaseDecl);
3869
389
    };
3870
3871
680
    OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3872
3873
    // The low-order byte of __offset_flags contains flags, as given by the
3874
    // masks from the enumeration __offset_flags_masks.
3875
680
    if (Base.isVirtual())
3876
291
      OffsetFlags |= BCTI_Virtual;
3877
680
    if (Base.getAccessSpecifier() == AS_public)
3878
619
      OffsetFlags |= BCTI_Public;
3879
3880
680
    Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
3881
680
  }
3882
431
}
3883
3884
/// Compute the flags for a __pbase_type_info, and remove the corresponding
3885
/// pieces from \p Type.
3886
277
static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
3887
277
  unsigned Flags = 0;
3888
3889
277
  if (Type.isConstQualified())
3890
104
    Flags |= ItaniumRTTIBuilder::PTI_Const;
3891
277
  if (Type.isVolatileQualified())
3892
4
    Flags |= ItaniumRTTIBuilder::PTI_Volatile;
3893
277
  if (Type.isRestrictQualified())
3894
0
    Flags |= ItaniumRTTIBuilder::PTI_Restrict;
3895
277
  Type = Type.getUnqualifiedType();
3896
3897
  // Itanium C++ ABI 2.9.5p7:
3898
  //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
3899
  //   incomplete class type, the incomplete target type flag is set.
3900
277
  if (ContainsIncompleteClassType(Type))
3901
19
    Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
3902
3903
277
  if (auto *Proto = Type->getAs<FunctionProtoType>()) {
3904
19
    if (Proto->isNothrow()) {
3905
2
      Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
3906
2
      Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
3907
2
    }
3908
19
  }
3909
3910
277
  return Flags;
3911
277
}
3912
3913
/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
3914
/// used for pointer types.
3915
256
void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
3916
  // Itanium C++ ABI 2.9.5p7:
3917
  //   __flags is a flag word describing the cv-qualification and other
3918
  //   attributes of the type pointed to
3919
256
  unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3920
3921
256
  llvm::Type *UnsignedIntLTy =
3922
256
    CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3923
256
  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3924
3925
  // Itanium C++ ABI 2.9.5p7:
3926
  //  __pointee is a pointer to the std::type_info derivation for the
3927
  //  unqualified type being pointed to.
3928
256
  llvm::Constant *PointeeTypeInfo =
3929
256
      ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3930
256
  Fields.push_back(PointeeTypeInfo);
3931
256
}
3932
3933
/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3934
/// struct, used for member pointer types.
3935
void
3936
21
ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
3937
21
  QualType PointeeTy = Ty->getPointeeType();
3938
3939
  // Itanium C++ ABI 2.9.5p7:
3940
  //   __flags is a flag word describing the cv-qualification and other
3941
  //   attributes of the type pointed to.
3942
21
  unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3943
3944
21
  const RecordType *ClassType = cast<RecordType>(Ty->getClass());
3945
21
  if (IsIncompleteClassType(ClassType))
3946
8
    Flags |= PTI_ContainingClassIncomplete;
3947
3948
21
  llvm::Type *UnsignedIntLTy =
3949
21
    CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3950
21
  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3951
3952
  // Itanium C++ ABI 2.9.5p7:
3953
  //   __pointee is a pointer to the std::type_info derivation for the
3954
  //   unqualified type being pointed to.
3955
21
  llvm::Constant *PointeeTypeInfo =
3956
21
      ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3957
21
  Fields.push_back(PointeeTypeInfo);
3958
3959
  // Itanium C++ ABI 2.9.5p9:
3960
  //   __context is a pointer to an abi::__class_type_info corresponding to the
3961
  //   class type containing the member pointed to
3962
  //   (e.g., the "A" in "int A::*").
3963
21
  Fields.push_back(
3964
21
      ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
3965
21
}
3966
3967
3.19k
llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
3968
3.19k
  return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
3969
3.19k
}
3970
3971
4
void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
3972
  // Types added here must also be added to TypeInfoIsInStandardLibrary.
3973
4
  QualType FundamentalTypes[] = {
3974
4
      getContext().VoidTy,             getContext().NullPtrTy,
3975
4
      getContext().BoolTy,             getContext().WCharTy,
3976
4
      getContext().CharTy,             getContext().UnsignedCharTy,
3977
4
      getContext().SignedCharTy,       getContext().ShortTy,
3978
4
      getContext().UnsignedShortTy,    getContext().IntTy,
3979
4
      getContext().UnsignedIntTy,      getContext().LongTy,
3980
4
      getContext().UnsignedLongTy,     getContext().LongLongTy,
3981
4
      getContext().UnsignedLongLongTy, getContext().Int128Ty,
3982
4
      getContext().UnsignedInt128Ty,   getContext().HalfTy,
3983
4
      getContext().FloatTy,            getContext().DoubleTy,
3984
4
      getContext().LongDoubleTy,       getContext().Float128Ty,
3985
4
      getContext().Char8Ty,            getContext().Char16Ty,
3986
4
      getContext().Char32Ty
3987
4
  };
3988
4
  llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3989
4
      RD->hasAttr<DLLExportAttr>()
3990
2
      ? llvm::GlobalValue::DLLExportStorageClass
3991
2
      : llvm::GlobalValue::DefaultStorageClass;
3992
4
  llvm::GlobalValue::VisibilityTypes Visibility =
3993
4
      CodeGenModule::GetLLVMVisibility(RD->getVisibility());
3994
100
  for (const QualType &FundamentalType : FundamentalTypes) {
3995
100
    QualType PointerType = getContext().getPointerType(FundamentalType);
3996
100
    QualType PointerTypeConst = getContext().getPointerType(
3997
100
        FundamentalType.withConst());
3998
100
    for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
3999
300
      ItaniumRTTIBuilder(*this).BuildTypeInfo(
4000
300
          Type, llvm::GlobalValue::ExternalLinkage,
4001
300
          Visibility, DLLStorageClass);
4002
100
  }
4003
4
}
4004
4005
/// What sort of uniqueness rules should we use for the RTTI for the
4006
/// given type?
4007
ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4008
4.66k
    QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4009
4.66k
  if (shouldRTTIBeUnique())
4010
4.62k
    return RUK_Unique;
4011
4012
  // It's only necessary for linkonce_odr or weak_odr linkage.
4013
40
  if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4014
20
      Linkage != llvm::GlobalValue::WeakODRLinkage)
4015
16
    return RUK_Unique;
4016
4017
  // It's only necessary with default visibility.
4018
24
  if (CanTy->getVisibility() != DefaultVisibility)
4019
8
    return RUK_Unique;
4020
4021
  // If we're not required to publish this symbol, hide it.
4022
16
  if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4023
12
    return RUK_NonUniqueHidden;
4024
4025
  // If we're required to publish this symbol, as we might be under an
4026
  // explicit instantiation, leave it with default visibility but
4027
  // enable string-comparisons.
4028
4
  assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4029
4
  return RUK_NonUniqueVisible;
4030
4
}
4031
4032
// Find out how to codegen the complete destructor and constructor
4033
namespace {
4034
enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4035
}
4036
static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4037
52.5k
                                       const CXXMethodDecl *MD) {
4038
52.5k
  if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4039
52.0k
    return StructorCodegen::Emit;
4040
4041
  // The complete and base structors are not equivalent if there are any virtual
4042
  // bases, so emit separate functions.
4043
538
  if (MD->getParent()->getNumVBases())
4044
33
    return StructorCodegen::Emit;
4045
4046
505
  GlobalDecl AliasDecl;
4047
505
  if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4048
352
    AliasDecl = GlobalDecl(DD, Dtor_Complete);
4049
153
  } else {
4050
153
    const auto *CD = cast<CXXConstructorDecl>(MD);
4051
153
    AliasDecl = GlobalDecl(CD, Ctor_Complete);
4052
153
  }
4053
505
  llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4054
4055
505
  if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4056
339
    return StructorCodegen::RAUW;
4057
4058
  // FIXME: Should we allow available_externally aliases?
4059
166
  if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4060
0
    return StructorCodegen::RAUW;
4061
4062
166
  if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4063
    // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4064
33
    if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4065
11
        CGM.getTarget().getTriple().isOSBinFormatWasm())
4066
22
      return StructorCodegen::COMDAT;
4067
11
    return StructorCodegen::Emit;
4068
11
  }
4069
4070
133
  return StructorCodegen::Alias;
4071
133
}
4072
4073
static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4074
                                           GlobalDecl AliasDecl,
4075
74
                                           GlobalDecl TargetDecl) {
4076
74
  llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4077
4078
74
  StringRef MangledName = CGM.getMangledName(AliasDecl);
4079
74
  llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4080
74
  if (Entry && 
!Entry->isDeclaration()2
)
4081
0
    return;
4082
4083
74
  auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4084
4085
  // Create the alias with no name.
4086
74
  auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4087
4088
  // Constructors and destructors are always unnamed_addr.
4089
74
  Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4090
4091
  // Switch any previous uses to the alias.
4092
74
  if (Entry) {
4093
2
    assert(Entry->getType() == Aliasee->getType() &&
4094
2
           "declaration exists with different type");
4095
2
    Alias->takeName(Entry);
4096
2
    Entry->replaceAllUsesWith(Alias);
4097
2
    Entry->eraseFromParent();
4098
72
  } else {
4099
72
    Alias->setName(MangledName);
4100
72
  }
4101
4102
  // Finally, set up the alias with its proper name and attributes.
4103
74
  CGM.SetCommonAttributes(AliasDecl, Alias);
4104
74
}
4105
4106
52.5k
void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4107
52.5k
  auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4108
52.5k
  auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4109
36.7k
  const CXXDestructorDecl *DD = CD ? nullptr : 
cast<CXXDestructorDecl>(MD)15.7k
;
4110
4111
52.5k
  StructorCodegen CGType = getCodegenToUse(CGM, MD);
4112
4113
52.5k
  if (CD ? 
GD.getCtorType() == Ctor_Complete36.7k
4114
23.1k
         : 
GD.getDtorType() == Dtor_Complete15.7k
) {
4115
23.1k
    GlobalDecl BaseDecl;
4116
23.1k
    if (CD)
4117
15.8k
      BaseDecl = GD.getWithCtorType(Ctor_Base);
4118
7.30k
    else
4119
7.30k
      BaseDecl = GD.getWithDtorType(Dtor_Base);
4120
4121
23.1k
    if (CGType == StructorCodegen::Alias || 
CGType == StructorCodegen::COMDAT23.0k
) {
4122
74
      emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4123
74
      return;
4124
74
    }
4125
4126
23.0k
    if (CGType == StructorCodegen::RAUW) {
4127
141
      StringRef MangledName = CGM.getMangledName(GD);
4128
141
      auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4129
141
      CGM.addReplacement(MangledName, Aliasee);
4130
141
      return;
4131
141
    }
4132
52.3k
  }
4133
4134
  // The base destructor is equivalent to the base destructor of its
4135
  // base class if there is exactly one non-virtual base class with a
4136
  // non-trivial destructor, there are no fields with a non-trivial
4137
  // destructor, and the body of the destructor is trivial.
4138
52.3k
  if (DD && 
GD.getDtorType() == Dtor_Base15.6k
&&
4139
7.80k
      CGType != StructorCodegen::COMDAT &&
4140
7.79k
      !CGM.TryEmitBaseDestructorAsAlias(DD))
4141
35
    return;
4142
4143
  // FIXME: The deleting destructor is equivalent to the selected operator
4144
  // delete if:
4145
  //  * either the delete is a destroying operator delete or the destructor
4146
  //    would be trivial if it weren't virtual,
4147
  //  * the conversion from the 'this' parameter to the first parameter of the
4148
  //    destructor is equivalent to a bitcast,
4149
  //  * the destructor does not have an implicit "this" return, and
4150
  //  * the operator delete has the same calling convention and IR function type
4151
  //    as the destructor.
4152
  // In such cases we should try to emit the deleting dtor as an alias to the
4153
  // selected 'operator delete'.
4154
4155
52.3k
  llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4156
4157
52.3k
  if (CGType == StructorCodegen::COMDAT) {
4158
12
    SmallString<256> Buffer;
4159
12
    llvm::raw_svector_ostream Out(Buffer);
4160
12
    if (DD)
4161
10
      getMangleContext().mangleCXXDtorComdat(DD, Out);
4162
2
    else
4163
2
      getMangleContext().mangleCXXCtorComdat(CD, Out);
4164
12
    llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4165
12
    Fn->setComdat(C);
4166
52.2k
  } else {
4167
52.2k
    CGM.maybeSetTrivialComdat(*MD, *Fn);
4168
52.2k
  }
4169
52.3k
}
4170
4171
985
static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4172
  // void *__cxa_begin_catch(void*);
4173
985
  llvm::FunctionType *FTy = llvm::FunctionType::get(
4174
985
      CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4175
4176
985
  return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4177
985
}
4178
4179
476
static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4180
  // void __cxa_end_catch();
4181
476
  llvm::FunctionType *FTy =
4182
476
      llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4183
4184
476
  return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4185
476
}
4186
4187
6
static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4188
  // void *__cxa_get_exception_ptr(void*);
4189
6
  llvm::FunctionType *FTy = llvm::FunctionType::get(
4190
6
      CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4191
4192
6
  return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4193
6
}
4194
4195
namespace {
4196
  /// A cleanup to call __cxa_end_catch.  In many cases, the caught
4197
  /// exception type lets us state definitively that the thrown exception
4198
  /// type does not have a destructor.  In particular:
4199
  ///   - Catch-alls tell us nothing, so we have to conservatively
4200
  ///     assume that the thrown exception might have a destructor.
4201
  ///   - Catches by reference behave according to their base types.
4202
  ///   - Catches of non-record types will only trigger for exceptions
4203
  ///     of non-record types, which never have destructors.
4204
  ///   - Catches of record types can trigger for arbitrary subclasses
4205
  ///     of the caught type, so we have to assume the actual thrown
4206
  ///     exception type might have a throwing destructor, even if the
4207
  ///     caught type's destructor is trivial or nothrow.
4208
  struct CallEndCatch final : EHScopeStack::Cleanup {
4209
341
    CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4210
    bool MightThrow;
4211
4212
476
    void Emit(CodeGenFunction &CGF, Flags flags) override {
4213
476
      if (!MightThrow) {
4214
187
        CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4215
187
        return;
4216
187
      }
4217
4218
289
      CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4219
289
    }
4220
  };
4221
}
4222
4223
/// Emits a call to __cxa_begin_catch and enters a cleanup to call
4224
/// __cxa_end_catch.
4225
///
4226
/// \param EndMightThrow - true if __cxa_end_catch might throw
4227
static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4228
                                   llvm::Value *Exn,
4229
341
                                   bool EndMightThrow) {
4230
341
  llvm::CallInst *call =
4231
341
    CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4232
4233
341
  CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4234
4235
341
  return call;
4236
341
}
4237
4238
/// A "special initializer" callback for initializing a catch
4239
/// parameter during catch initialization.
4240
static void InitCatchParam(CodeGenFunction &CGF,
4241
                           const VarDecl &CatchParam,
4242
                           Address ParamAddr,
4243
154
                           SourceLocation Loc) {
4244
  // Load the exception from where the landing pad saved it.
4245
154
  llvm::Value *Exn = CGF.getExceptionFromSlot();
4246
4247
154
  CanQualType CatchType =
4248
154
    CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4249
154
  llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4250
4251
  // If we're catching by reference, we can just cast the object
4252
  // pointer to the appropriate pointer.
4253
154
  if (isa<ReferenceType>(CatchType)) {
4254
36
    QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4255
36
    bool EndCatchMightThrow = CaughtType->isRecordType();
4256
4257
    // __cxa_begin_catch returns the adjusted object pointer.
4258
36
    llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4259
4260
    // We have no way to tell the personality function that we're
4261
    // catching by reference, so if we're catching a pointer,
4262
    // __cxa_begin_catch will actually return that pointer by value.
4263
36
    if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4264
9
      QualType PointeeType = PT->getPointeeType();
4265
4266
      // When catching by reference, generally we should just ignore
4267
      // this by-value pointer and use the exception object instead.
4268
9
      if (!PointeeType->isRecordType()) {
4269
4270
        // Exn points to the struct _Unwind_Exception header, which
4271
        // we have to skip past in order to reach the exception data.
4272
7
        unsigned HeaderSize =
4273
7
          CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4274
7
        AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
4275
4276
      // However, if we're catching a pointer-to-record type that won't
4277
      // work, because the personality function might have adjusted
4278
      // the pointer.  There's actually no way for us to fully satisfy
4279
      // the language/ABI contract here:  we can't use Exn because it
4280
      // might have the wrong adjustment, but we can't use the by-value
4281
      // pointer because it's off by a level of abstraction.
4282
      //
4283
      // The current solution is to dump the adjusted pointer into an
4284
      // alloca, which breaks language semantics (because changing the
4285
      // pointer doesn't change the exception) but at least works.
4286
      // The better solution would be to filter out non-exact matches
4287
      // and rethrow them, but this is tricky because the rethrow
4288
      // really needs to be catchable by other sites at this landing
4289
      // pad.  The best solution is to fix the personality function.
4290
2
      } else {
4291
        // Pull the pointer for the reference type off.
4292
2
        llvm::Type *PtrTy =
4293
2
          cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
4294
4295
        // Create the temporary and write the adjusted pointer into it.
4296
2
        Address ExnPtrTmp =
4297
2
          CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4298
2
        llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4299
2
        CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4300
4301
        // Bind the reference to the temporary.
4302
2
        AdjustedExn = ExnPtrTmp.getPointer();
4303
2
      }
4304
9
    }
4305
4306
36
    llvm::Value *ExnCast =
4307
36
      CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4308
36
    CGF.Builder.CreateStore(ExnCast, ParamAddr);
4309
36
    return;
4310
36
  }
4311
4312
  // Scalars and complexes.
4313
118
  TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4314
118
  if (TEK != TEK_Aggregate) {
4315
109
    llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4316
4317
    // If the catch type is a pointer type, __cxa_begin_catch returns
4318
    // the pointer by value.
4319
109
    if (CatchType->hasPointerRepresentation()) {
4320
8
      llvm::Value *CastExn =
4321
8
        CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4322
4323
8
      switch (CatchType.getQualifiers().getObjCLifetime()) {
4324
1
      case Qualifiers::OCL_Strong:
4325
1
        CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4326
1
        LLVM_FALLTHROUGH;
4327
4328
7
      case Qualifiers::OCL_None:
4329
7
      case Qualifiers::OCL_ExplicitNone:
4330
7
      case Qualifiers::OCL_Autoreleasing:
4331
7
        CGF.Builder.CreateStore(CastExn, ParamAddr);
4332
7
        return;
4333
4334
1
      case Qualifiers::OCL_Weak:
4335
1
        CGF.EmitARCInitWeak(ParamAddr, CastExn);
4336
1
        return;
4337
0
      }
4338
0
      llvm_unreachable("bad ownership qualifier!");
4339
0
    }
4340
4341
    // Otherwise, it returns a pointer into the exception object.
4342
4343
101
    llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4344
101
    llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4345
4346
101
    LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4347
101
    LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4348
101
    switch (TEK) {
4349
0
    case TEK_Complex:
4350
0
      CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4351
0
                             /*init*/ true);
4352
0
      return;
4353
101
    case TEK_Scalar: {
4354
101
      llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4355
101
      CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4356
101
      return;
4357
0
    }
4358
0
    case TEK_Aggregate:
4359
0
      llvm_unreachable("evaluation kind filtered out!");
4360
0
    }
4361
0
    llvm_unreachable("bad evaluation kind");
4362
0
  }
4363
4364
9
  assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4365
9
  auto catchRD = CatchType->getAsCXXRecordDecl();
4366
9
  CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4367
4368
9
  llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4369
4370
  // Check for a copy expression.  If we don't have a copy expression,
4371
  // that means a trivial copy is okay.
4372
9
  const Expr *copyExpr = CatchParam.getInit();
4373
9
  if (!copyExpr) {
4374
3
    llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4375
3
    Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4376
3
                        caughtExnAlignment);
4377
3
    LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4378
3
    LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4379
3
    CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4380
3
    return;
4381
3
  }
4382
4383
  // We have to call __cxa_get_exception_ptr to get the adjusted
4384
  // pointer before copying.
4385
6
  llvm::CallInst *rawAdjustedExn =
4386
6
    CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4387
4388
  // Cast that to the appropriate type.
4389
6
  Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4390
6
                      caughtExnAlignment);
4391
4392
  // The copy expression is defined in terms of an OpaqueValueExpr.
4393
  // Find it and map it to the adjusted expression.
4394
6
  CodeGenFunction::OpaqueValueMapping
4395
6
    opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4396
6
           CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4397
4398
  // Call the copy ctor in a terminate scope.
4399
6
  CGF.EHStack.pushTerminate();
4400
4401
  // Perform the copy construction.
4402
6
  CGF.EmitAggExpr(copyExpr,
4403
6
                  AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4404
6
                                        AggValueSlot::IsNotDestructed,
4405
6
                                        AggValueSlot::DoesNotNeedGCBarriers,
4406
6
                                        AggValueSlot::IsNotAliased,
4407
6
                                        AggValueSlot::DoesNotOverlap));
4408
4409
  // Leave the terminate scope.
4410
6
  CGF.EHStack.popTerminate();
4411
4412
  // Undo the opaque value mapping.
4413
6
  opaque.pop();
4414
4415
  // Finally we can call __cxa_begin_catch.
4416
6
  CallBeginCatch(CGF, Exn, true);
4417
6
}
4418
4419
/// Begins a catch statement by initializing the catch variable and
4420
/// calling __cxa_begin_catch.
4421
void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4422
341
                                   const CXXCatchStmt *S) {
4423
  // We have to be very careful with the ordering of cleanups here:
4424
  //   C++ [except.throw]p4:
4425
  //     The destruction [of the exception temporary] occurs
4426
  //     immediately after the destruction of the object declared in
4427
  //     the exception-declaration in the handler.
4428
  //
4429
  // So the precise ordering is:
4430
  //   1.  Construct catch variable.
4431
  //   2.  __cxa_begin_catch
4432
  //   3.  Enter __cxa_end_catch cleanup
4433
  //   4.  Enter dtor cleanup
4434
  //
4435
  // We do this by using a slightly abnormal initialization process.
4436
  // Delegation sequence:
4437
  //   - ExitCXXTryStmt opens a RunCleanupsScope
4438
  //     - EmitAutoVarAlloca creates the variable and debug info
4439
  //       - InitCatchParam initializes the variable from the exception
4440
  //       - CallBeginCatch calls __cxa_begin_catch
4441
  //       - CallBeginCatch enters the __cxa_end_catch cleanup
4442
  //     - EmitAutoVarCleanups enters the variable destructor cleanup
4443
  //   - EmitCXXTryStmt emits the code for the catch body
4444
  //   - EmitCXXTryStmt close the RunCleanupsScope
4445
4446
341
  VarDecl *CatchParam = S->getExceptionDecl();
4447
341
  if (!CatchParam) {
4448
187
    llvm::Value *Exn = CGF.getExceptionFromSlot();
4449
187
    CallBeginCatch(CGF, Exn, true);
4450
187
    return;
4451
187
  }
4452
4453
  // Emit the local.
4454
154
  CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4455
154
  InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4456
154
  CGF.EmitAutoVarCleanups(var);
4457
154
}
4458
4459
/// Get or define the following function:
4460
///   void @__clang_call_terminate(i8* %exn) nounwind noreturn
4461
/// This code is used only in C++.
4462
4.03k
static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4463
4.03k
  llvm::FunctionType *fnTy =
4464
4.03k
    llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4465
4.03k
  llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4466
4.03k
      fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4467
4.03k
  llvm::Function *fn =
4468
4.03k
      cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4469
4.03k
  if (fn->empty()) {
4470
644
    fn->setDoesNotThrow();
4471
644
    fn->setDoesNotReturn();
4472
4473
    // What we really want is to massively penalize inlining without
4474
    // forbidding it completely.  The difference between that and
4475
    // 'noinline' is negligible.
4476
644
    fn->addFnAttr(llvm::Attribute::NoInline);
4477
4478
    // Allow this function to be shared across translation units, but
4479
    // we don't want it to turn into an exported symbol.
4480
644
    fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4481
644
    fn->setVisibility(llvm::Function::HiddenVisibility);
4482
644
    if (CGM.supportsCOMDAT())
4483
135
      fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4484
4485
    // Set up the function.
4486
644
    llvm::BasicBlock *entry =
4487
644
        llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4488
644
    CGBuilderTy builder(CGM, entry);
4489
4490
    // Pull the exception pointer out of the parameter list.
4491
644
    llvm::Value *exn = &*fn->arg_begin();
4492
4493
    // Call __cxa_begin_catch(exn).
4494
644
    llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4495
644
    catchCall->setDoesNotThrow();
4496
644
    catchCall->setCallingConv(CGM.getRuntimeCC());
4497
4498
    // Call std::terminate().
4499
644
    llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4500
644
    termCall->setDoesNotThrow();
4501
644
    termCall->setDoesNotReturn();
4502
644
    termCall->setCallingConv(CGM.getRuntimeCC());
4503
4504
    // std::terminate cannot return.
4505
644
    builder.CreateUnreachable();
4506
644
  }
4507
4.03k
  return fnRef;
4508
4.03k
}
4509
4510
llvm::CallInst *
4511
ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4512
4.04k
                                                   llvm::Value *Exn) {
4513
  // In C++, we want to call __cxa_begin_catch() before terminating.
4514
4.04k
  if (Exn) {
4515
4.03k
    assert(CGF.CGM.getLangOpts().CPlusPlus);
4516
4.03k
    return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4517
4.03k
  }
4518
3
  return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4519
3
}
4520
4521
std::pair<llvm::Value *, const CXXRecordDecl *>
4522
ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4523
27
                             const CXXRecordDecl *RD) {
4524
27
  return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4525
27
}
4526
4527
void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4528
98
                                       const CXXCatchStmt *C) {
4529
98
  if (CGF.getTarget().hasFeature("exception-handling"))
4530
84
    CGF.EHStack.pushCleanup<CatchRetScope>(
4531
84
        NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4532
98
  ItaniumCXXABI::emitBeginCatch(CGF, C);
4533
98
}
4534
4535
/// Register a global destructor as best as we know how.
4536
void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4537
                                  llvm::FunctionCallee dtor,
4538
22
                                  llvm::Constant *addr) {
4539
22
  if (D.getTLSKind() != VarDecl::TLS_None)
4540
0
    llvm::report_fatal_error("thread local storage not yet implemented on AIX");
4541
4542
  // Create __dtor function for the var decl.
4543
22
  llvm::Function *dtorStub = CGF.createAtExitStub(D, dtor, addr);
4544
4545
  // Register above __dtor with atexit().
4546
22
  CGF.registerGlobalDtorWithAtExit(dtorStub);
4547
4548
  // Emit __finalize function to unregister __dtor and (as appropriate) call
4549
  // __dtor.
4550
22
  emitCXXStermFinalizer(D, dtorStub, addr);
4551
22
}
4552
4553
void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4554
22
                                     llvm::Constant *addr) {
4555
22
  llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4556
22
  SmallString<256> FnName;
4557
22
  {
4558
22
    llvm::raw_svector_ostream Out(FnName);
4559
22
    getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4560
22
  }
4561
4562
  // Create the finalization action associated with a variable.
4563
22
  const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
4564
22
  llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4565
22
      FTy, FnName.str(), FI, D.getLocation());
4566
4567
22
  CodeGenFunction CGF(CGM);
4568
4569
22
  CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4570
22
                    FunctionArgList(), D.getLocation(),
4571
22
                    D.getInit()->getExprLoc());
4572
4573
  // The unatexit subroutine unregisters __dtor functions that were previously
4574
  // registered by the atexit subroutine. If the referenced function is found,
4575
  // the unatexit returns a value of 0, meaning that the cleanup is still
4576
  // pending (and we should call the __dtor function).
4577
22
  llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4578
4579
22
  llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4580
4581
22
  llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4582
22
  llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4583
4584
  // Check if unatexit returns a value of 0. If it does, jump to
4585
  // DestructCallBlock, otherwise jump to EndBlock directly.
4586
22
  CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4587
4588
22
  CGF.EmitBlock(DestructCallBlock);
4589
4590
  // Emit the call to dtorStub.
4591
22
  llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4592
4593
  // Make sure the call and the callee agree on calling convention.
4594
22
  CI->setCallingConv(dtorStub->getCallingConv());
4595
4596
22
  CGF.EmitBlock(EndBlock);
4597
4598
22
  CGF.FinishFunction();
4599
4600
22
  assert(!D.getAttr<InitPriorityAttr>() &&
4601
22
         "Prioritized sinit and sterm functions are not yet supported.");
4602
4603
22
  if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
4604
18
      getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR)
4605
    // According to C++ [basic.start.init]p2, class template static data
4606
    // members (i.e., implicitly or explicitly instantiated specializations)
4607
    // have unordered initialization. As a consequence, we can put them into
4608
    // their own llvm.global_dtors entry.
4609
6
    CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4610
16
  else
4611
16
    CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4612
22
}