Coverage Report

Created: 2022-07-16 07:03

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
Line
Count
Source (jump to first uncovered line)
1
//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This provides C++ code generation targeting the Itanium C++ ABI.  The class
10
// in this file generates structures that follow the Itanium C++ ABI, which is
11
// documented at:
12
//  https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13
//  https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14
//
15
// It also supports the closely-related ARM ABI, documented at:
16
// https://developer.arm.com/documentation/ihi0041/g/
17
//
18
//===----------------------------------------------------------------------===//
19
20
#include "CGCXXABI.h"
21
#include "CGCleanup.h"
22
#include "CGRecordLayout.h"
23
#include "CGVTables.h"
24
#include "CodeGenFunction.h"
25
#include "CodeGenModule.h"
26
#include "TargetInfo.h"
27
#include "clang/AST/Attr.h"
28
#include "clang/AST/Mangle.h"
29
#include "clang/AST/StmtCXX.h"
30
#include "clang/AST/Type.h"
31
#include "clang/CodeGen/ConstantInitBuilder.h"
32
#include "llvm/IR/DataLayout.h"
33
#include "llvm/IR/GlobalValue.h"
34
#include "llvm/IR/Instructions.h"
35
#include "llvm/IR/Intrinsics.h"
36
#include "llvm/IR/Value.h"
37
#include "llvm/Support/ScopedPrinter.h"
38
39
using namespace clang;
40
using namespace CodeGen;
41
42
namespace {
43
class ItaniumCXXABI : public CodeGen::CGCXXABI {
44
  /// VTables - All the vtables which have been defined.
45
  llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
46
47
  /// All the thread wrapper functions that have been used.
48
  llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
49
      ThreadWrappers;
50
51
protected:
52
  bool UseARMMethodPtrABI;
53
  bool UseARMGuardVarABI;
54
  bool Use32BitVTableOffsetABI;
55
56
11.8k
  ItaniumMangleContext &getMangleContext() {
57
11.8k
    return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
58
11.8k
  }
59
60
public:
61
  ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
62
                bool UseARMMethodPtrABI = false,
63
                bool UseARMGuardVarABI = false) :
64
    CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
65
    UseARMGuardVarABI(UseARMGuardVarABI),
66
35.6k
    Use32BitVTableOffsetABI(false) { }
67
68
  bool classifyReturnType(CGFunctionInfo &FI) const override;
69
70
133k
  RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
71
    // If C++ prohibits us from making a copy, pass by address.
72
133k
    if (!RD->canPassInRegisters())
73
13.3k
      return RAA_Indirect;
74
120k
    return RAA_Default;
75
133k
  }
76
77
1.02k
  bool isThisCompleteObject(GlobalDecl GD) const override {
78
    // The Itanium ABI has separate complete-object vs.  base-object
79
    // variants of both constructors and destructors.
80
1.02k
    if (isa<CXXDestructorDecl>(GD.getDecl())) {
81
377
      switch (GD.getDtorType()) {
82
159
      case Dtor_Complete:
83
274
      case Dtor_Deleting:
84
274
        return true;
85
86
103
      case Dtor_Base:
87
103
        return false;
88
89
0
      case Dtor_Comdat:
90
0
        llvm_unreachable("emitting dtor comdat as function?");
91
377
      }
92
0
      llvm_unreachable("bad dtor kind");
93
0
    }
94
647
    if (isa<CXXConstructorDecl>(GD.getDecl())) {
95
378
      switch (GD.getCtorType()) {
96
217
      case Ctor_Complete:
97
217
        return true;
98
99
161
      case Ctor_Base:
100
161
        return false;
101
102
0
      case Ctor_CopyingClosure:
103
0
      case Ctor_DefaultClosure:
104
0
        llvm_unreachable("closure ctors in Itanium ABI?");
105
106
0
      case Ctor_Comdat:
107
0
        llvm_unreachable("emitting ctor comdat as function?");
108
378
      }
109
0
      llvm_unreachable("bad dtor kind");
110
0
    }
111
112
    // No other kinds.
113
269
    return false;
114
647
  }
115
116
  bool isZeroInitializable(const MemberPointerType *MPT) override;
117
118
  llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
119
120
  CGCallee
121
    EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
122
                                    const Expr *E,
123
                                    Address This,
124
                                    llvm::Value *&ThisPtrForCall,
125
                                    llvm::Value *MemFnPtr,
126
                                    const MemberPointerType *MPT) override;
127
128
  llvm::Value *
129
    EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
130
                                 Address Base,
131
                                 llvm::Value *MemPtr,
132
                                 const MemberPointerType *MPT) override;
133
134
  llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
135
                                           const CastExpr *E,
136
                                           llvm::Value *Src) override;
137
  llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
138
                                              llvm::Constant *Src) override;
139
140
  llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
141
142
  llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
143
  llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
144
                                        CharUnits offset) override;
145
  llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
146
  llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
147
                                     CharUnits ThisAdjustment);
148
149
  llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
150
                                           llvm::Value *L, llvm::Value *R,
151
                                           const MemberPointerType *MPT,
152
                                           bool Inequality) override;
153
154
  llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
155
                                         llvm::Value *Addr,
156
                                         const MemberPointerType *MPT) override;
157
158
  void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
159
                               Address Ptr, QualType ElementType,
160
                               const CXXDestructorDecl *Dtor) override;
161
162
  void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
163
  void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
164
165
  void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
166
167
  llvm::CallInst *
168
  emitTerminateForUnexpectedException(CodeGenFunction &CGF,
169
                                      llvm::Value *Exn) override;
170
171
  void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
172
  llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
173
  CatchTypeInfo
174
  getAddrOfCXXCatchHandlerType(QualType Ty,
175
92
                               QualType CatchHandlerType) override {
176
92
    return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
177
92
  }
178
179
  bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
180
  void EmitBadTypeidCall(CodeGenFunction &CGF) override;
181
  llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
182
                          Address ThisPtr,
183
                          llvm::Type *StdTypeInfoPtrTy) override;
184
185
  bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
186
                                          QualType SrcRecordTy) override;
187
188
  llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
189
                                   QualType SrcRecordTy, QualType DestTy,
190
                                   QualType DestRecordTy,
191
                                   llvm::BasicBlock *CastEnd) override;
192
193
  llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
194
                                     QualType SrcRecordTy,
195
                                     QualType DestTy) override;
196
197
  bool EmitBadCastCall(CodeGenFunction &CGF) override;
198
199
  llvm::Value *
200
    GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
201
                              const CXXRecordDecl *ClassDecl,
202
                              const CXXRecordDecl *BaseClassDecl) override;
203
204
  void EmitCXXConstructors(const CXXConstructorDecl *D) override;
205
206
  AddedStructorArgCounts
207
  buildStructorSignature(GlobalDecl GD,
208
                         SmallVectorImpl<CanQualType> &ArgTys) override;
209
210
  bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
211
17.7k
                              CXXDtorType DT) const override {
212
    // Itanium does not emit any destructor variant as an inline thunk.
213
    // Delegating may occur as an optimization, but all variants are either
214
    // emitted with external linkage or as linkonce if they are inline and used.
215
17.7k
    return false;
216
17.7k
  }
217
218
  void EmitCXXDestructors(const CXXDestructorDecl *D) override;
219
220
  void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
221
                                 FunctionArgList &Params) override;
222
223
  void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
224
225
  AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
226
                                               const CXXConstructorDecl *D,
227
                                               CXXCtorType Type,
228
                                               bool ForVirtualBase,
229
                                               bool Delegating) override;
230
231
  llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
232
                                             const CXXDestructorDecl *DD,
233
                                             CXXDtorType Type,
234
                                             bool ForVirtualBase,
235
                                             bool Delegating) override;
236
237
  void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
238
                          CXXDtorType Type, bool ForVirtualBase,
239
                          bool Delegating, Address This,
240
                          QualType ThisTy) override;
241
242
  void emitVTableDefinitions(CodeGenVTables &CGVT,
243
                             const CXXRecordDecl *RD) override;
244
245
  bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
246
                                           CodeGenFunction::VPtr Vptr) override;
247
248
2.05k
  bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
249
2.05k
    return true;
250
2.05k
  }
251
252
  llvm::Constant *
253
  getVTableAddressPoint(BaseSubobject Base,
254
                        const CXXRecordDecl *VTableClass) override;
255
256
  llvm::Value *getVTableAddressPointInStructor(
257
      CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258
      BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
259
260
  llvm::Value *getVTableAddressPointInStructorWithVTT(
261
      CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
262
      BaseSubobject Base, const CXXRecordDecl *NearestVBase);
263
264
  llvm::Constant *
265
  getVTableAddressPointForConstExpr(BaseSubobject Base,
266
                                    const CXXRecordDecl *VTableClass) override;
267
268
  llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
269
                                        CharUnits VPtrOffset) override;
270
271
  CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
272
                                     Address This, llvm::Type *Ty,
273
                                     SourceLocation Loc) override;
274
275
  llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
276
                                         const CXXDestructorDecl *Dtor,
277
                                         CXXDtorType DtorType, Address This,
278
                                         DeleteOrMemberCallExpr E) override;
279
280
  void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
281
282
  bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
283
  bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
284
285
  void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
286
366
                       bool ReturnAdjustment) override {
287
    // Allow inlining of thunks by emitting them with available_externally
288
    // linkage together with vtables when needed.
289
366
    if (ForVTable && 
!Thunk->hasLocalLinkage()39
)
290
29
      Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
291
366
    CGM.setGVProperties(Thunk, GD);
292
366
  }
293
294
366
  bool exportThunk() override { return true; }
295
296
  llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
297
                                     const ThisAdjustment &TA) override;
298
299
  llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
300
                                       const ReturnAdjustment &RA) override;
301
302
  size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
303
232
                              FunctionArgList &Args) const override {
304
232
    assert(!Args.empty() && "expected the arglist to not be empty!");
305
0
    return Args.size() - 1;
306
232
  }
307
308
59
  StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
309
  StringRef GetDeletedVirtualCallName() override
310
8
    { return "__cxa_deleted_virtual"; }
311
312
  CharUnits getArrayCookieSizeImpl(QualType elementType) override;
313
  Address InitializeArrayCookie(CodeGenFunction &CGF,
314
                                Address NewPtr,
315
                                llvm::Value *NumElements,
316
                                const CXXNewExpr *expr,
317
                                QualType ElementType) override;
318
  llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
319
                                   Address allocPtr,
320
                                   CharUnits cookieSize) override;
321
322
  void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
323
                       llvm::GlobalVariable *DeclPtr,
324
                       bool PerformInit) override;
325
  void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
326
                          llvm::FunctionCallee dtor,
327
                          llvm::Constant *addr) override;
328
329
  llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
330
                                                llvm::Value *Val);
331
  void EmitThreadLocalInitFuncs(
332
      CodeGenModule &CGM,
333
      ArrayRef<const VarDecl *> CXXThreadLocals,
334
      ArrayRef<llvm::Function *> CXXThreadLocalInits,
335
      ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
336
337
459
  bool usesThreadWrapperFunction(const VarDecl *VD) const override {
338
459
    return !isEmittedWithConstantInitializer(VD) ||
339
459
           
mayNeedDestruction(VD)141
;
340
459
  }
341
  LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
342
                                      QualType LValType) override;
343
344
  bool NeedsVTTParameter(GlobalDecl GD) override;
345
346
  /**************************** RTTI Uniqueness ******************************/
347
348
protected:
349
  /// Returns true if the ABI requires RTTI type_info objects to be unique
350
  /// across a program.
351
5.12k
  virtual bool shouldRTTIBeUnique() const { return true; }
352
353
public:
354
  /// What sort of unique-RTTI behavior should we use?
355
  enum RTTIUniquenessKind {
356
    /// We are guaranteeing, or need to guarantee, that the RTTI string
357
    /// is unique.
358
    RUK_Unique,
359
360
    /// We are not guaranteeing uniqueness for the RTTI string, so we
361
    /// can demote to hidden visibility but must use string comparisons.
362
    RUK_NonUniqueHidden,
363
364
    /// We are not guaranteeing uniqueness for the RTTI string, so we
365
    /// have to use string comparisons, but we also have to emit it with
366
    /// non-hidden visibility.
367
    RUK_NonUniqueVisible
368
  };
369
370
  /// Return the required visibility status for the given type and linkage in
371
  /// the current ABI.
372
  RTTIUniquenessKind
373
  classifyRTTIUniqueness(QualType CanTy,
374
                         llvm::GlobalValue::LinkageTypes Linkage) const;
375
  friend class ItaniumRTTIBuilder;
376
377
  void emitCXXStructor(GlobalDecl GD) override;
378
379
  std::pair<llvm::Value *, const CXXRecordDecl *>
380
  LoadVTablePtr(CodeGenFunction &CGF, Address This,
381
                const CXXRecordDecl *RD) override;
382
383
 private:
384
371
   bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
385
371
     const auto &VtableLayout =
386
371
         CGM.getItaniumVTableContext().getVTableLayout(RD);
387
388
1.55k
     for (const auto &VtableComponent : VtableLayout.vtable_components()) {
389
       // Skip empty slot.
390
1.55k
       if (!VtableComponent.isUsedFunctionPointerKind())
391
911
         continue;
392
393
647
       const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
394
647
       if (!Method->getCanonicalDecl()->isInlined())
395
489
         continue;
396
397
158
       StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
398
158
       auto *Entry = CGM.GetGlobalValue(Name);
399
       // This checks if virtual inline function has already been emitted.
400
       // Note that it is possible that this inline function would be emitted
401
       // after trying to emit vtable speculatively. Because of this we do
402
       // an extra pass after emitting all deferred vtables to find and emit
403
       // these vtables opportunistically.
404
158
       if (!Entry || 
Entry->isDeclaration()48
)
405
147
         return true;
406
158
     }
407
224
     return false;
408
371
  }
409
410
442
  bool isVTableHidden(const CXXRecordDecl *RD) const {
411
442
    const auto &VtableLayout =
412
442
            CGM.getItaniumVTableContext().getVTableLayout(RD);
413
414
2.09k
    for (const auto &VtableComponent : VtableLayout.vtable_components()) {
415
2.09k
      if (VtableComponent.isRTTIKind()) {
416
484
        const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
417
484
        if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
418
12
          return true;
419
1.61k
      } else if (VtableComponent.isUsedFunctionPointerKind()) {
420
992
        const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
421
992
        if (Method->getVisibility() == Visibility::HiddenVisibility &&
422
992
            
!Method->isDefined()7
)
423
7
          return true;
424
992
      }
425
2.09k
    }
426
423
    return false;
427
442
  }
428
};
429
430
class ARMCXXABI : public ItaniumCXXABI {
431
public:
432
  ARMCXXABI(CodeGen::CodeGenModule &CGM) :
433
    ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
434
711
                  /*UseARMGuardVarABI=*/true) {}
435
436
17.3k
  bool HasThisReturn(GlobalDecl GD) const override {
437
17.3k
    return (isa<CXXConstructorDecl>(GD.getDecl()) || (
438
15.3k
              isa<CXXDestructorDecl>(GD.getDecl()) &&
439
15.3k
              
GD.getDtorType() != Dtor_Deleting1.29k
));
440
17.3k
  }
441
442
  void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
443
                           QualType ResTy) override;
444
445
  CharUnits getArrayCookieSizeImpl(QualType elementType) override;
446
  Address InitializeArrayCookie(CodeGenFunction &CGF,
447
                                Address NewPtr,
448
                                llvm::Value *NumElements,
449
                                const CXXNewExpr *expr,
450
                                QualType ElementType) override;
451
  llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
452
                                   CharUnits cookieSize) override;
453
};
454
455
class AppleARM64CXXABI : public ARMCXXABI {
456
public:
457
106
  AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
458
106
    Use32BitVTableOffsetABI = true;
459
106
  }
460
461
  // ARM64 libraries are prepared for non-unique RTTI.
462
40
  bool shouldRTTIBeUnique() const override { return false; }
463
};
464
465
class FuchsiaCXXABI final : public ItaniumCXXABI {
466
public:
467
  explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
468
30
      : ItaniumCXXABI(CGM) {}
469
470
private:
471
581
  bool HasThisReturn(GlobalDecl GD) const override {
472
581
    return isa<CXXConstructorDecl>(GD.getDecl()) ||
473
581
           
(397
isa<CXXDestructorDecl>(GD.getDecl())397
&&
474
397
            
GD.getDtorType() != Dtor_Deleting216
);
475
581
  }
476
};
477
478
class WebAssemblyCXXABI final : public ItaniumCXXABI {
479
public:
480
  explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
481
      : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
482
41
                      /*UseARMGuardVarABI=*/true) {}
483
  void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
484
  llvm::CallInst *
485
  emitTerminateForUnexpectedException(CodeGenFunction &CGF,
486
                                      llvm::Value *Exn) override;
487
488
private:
489
789
  bool HasThisReturn(GlobalDecl GD) const override {
490
789
    return isa<CXXConstructorDecl>(GD.getDecl()) ||
491
789
           
(685
isa<CXXDestructorDecl>(GD.getDecl())685
&&
492
685
            
GD.getDtorType() != Dtor_Deleting151
);
493
789
  }
494
1
  bool canCallMismatchedFunctionType() const override { return false; }
495
};
496
497
class XLCXXABI final : public ItaniumCXXABI {
498
public:
499
  explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
500
117
      : ItaniumCXXABI(CGM) {}
501
502
  void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
503
                          llvm::FunctionCallee dtor,
504
                          llvm::Constant *addr) override;
505
506
162
  bool useSinitAndSterm() const override { return true; }
507
508
private:
509
  void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
510
                             llvm::Constant *addr);
511
};
512
}
513
514
35.6k
CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
515
35.6k
  switch (CGM.getContext().getCXXABIKind()) {
516
  // For IR-generation purposes, there's no significant difference
517
  // between the ARM and iOS ABIs.
518
486
  case TargetCXXABI::GenericARM:
519
582
  case TargetCXXABI::iOS:
520
605
  case TargetCXXABI::WatchOS:
521
605
    return new ARMCXXABI(CGM);
522
523
106
  case TargetCXXABI::AppleARM64:
524
106
    return new AppleARM64CXXABI(CGM);
525
526
30
  case TargetCXXABI::Fuchsia:
527
30
    return new FuchsiaCXXABI(CGM);
528
529
  // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
530
  // include the other 32-bit ARM oddities: constructor/destructor return values
531
  // and array cookies.
532
2.31k
  case TargetCXXABI::GenericAArch64:
533
2.31k
    return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
534
2.31k
                             /*UseARMGuardVarABI=*/true);
535
536
153
  case TargetCXXABI::GenericMIPS:
537
153
    return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
538
539
41
  case TargetCXXABI::WebAssembly:
540
41
    return new WebAssemblyCXXABI(CGM);
541
542
117
  case TargetCXXABI::XL:
543
117
    return new XLCXXABI(CGM);
544
545
32.2k
  case TargetCXXABI::GenericItanium:
546
32.2k
    if (CGM.getContext().getTargetInfo().getTriple().getArch()
547
32.2k
        == llvm::Triple::le32) {
548
      // For PNaCl, use ARM-style method pointers so that PNaCl code
549
      // does not assume anything about the alignment of function
550
      // pointers.
551
0
      return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
552
0
    }
553
32.2k
    return new ItaniumCXXABI(CGM);
554
555
0
  case TargetCXXABI::Microsoft:
556
0
    llvm_unreachable("Microsoft ABI is not Itanium-based");
557
35.6k
  }
558
0
  llvm_unreachable("bad ABI kind");
559
0
}
560
561
llvm::Type *
562
1.00k
ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
563
1.00k
  if (MPT->isMemberDataPointer())
564
217
    return CGM.PtrDiffTy;
565
792
  return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
566
1.00k
}
567
568
/// In the Itanium and ARM ABIs, method pointers have the form:
569
///   struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
570
///
571
/// In the Itanium ABI:
572
///  - method pointers are virtual if (memptr.ptr & 1) is nonzero
573
///  - the this-adjustment is (memptr.adj)
574
///  - the virtual offset is (memptr.ptr - 1)
575
///
576
/// In the ARM ABI:
577
///  - method pointers are virtual if (memptr.adj & 1) is nonzero
578
///  - the this-adjustment is (memptr.adj >> 1)
579
///  - the virtual offset is (memptr.ptr)
580
/// ARM uses 'adj' for the virtual flag because Thumb functions
581
/// may be only single-byte aligned.
582
///
583
/// If the member is virtual, the adjusted 'this' pointer points
584
/// to a vtable pointer from which the virtual offset is applied.
585
///
586
/// If the member is non-virtual, memptr.ptr is the address of
587
/// the function to call.
588
CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
589
    CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
590
    llvm::Value *&ThisPtrForCall,
591
100
    llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
592
100
  CGBuilderTy &Builder = CGF.Builder;
593
594
100
  const FunctionProtoType *FPT =
595
100
    MPT->getPointeeType()->getAs<FunctionProtoType>();
596
100
  auto *RD =
597
100
      cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
598
599
100
  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
600
100
      CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
601
602
100
  llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
603
604
100
  llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
605
100
  llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
606
100
  llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
607
608
  // Extract memptr.adj, which is in the second field.
609
100
  llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
610
611
  // Compute the true adjustment.
612
100
  llvm::Value *Adj = RawAdj;
613
100
  if (UseARMMethodPtrABI)
614
23
    Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
615
616
  // Apply the adjustment and cast back to the original struct type
617
  // for consistency.
618
100
  llvm::Value *This = ThisAddr.getPointer();
619
100
  llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
620
100
  Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
621
100
  This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
622
100
  ThisPtrForCall = This;
623
624
  // Load the function pointer.
625
100
  llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
626
627
  // If the LSB in the function pointer is 1, the function pointer points to
628
  // a virtual function.
629
100
  llvm::Value *IsVirtual;
630
100
  if (UseARMMethodPtrABI)
631
23
    IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
632
77
  else
633
77
    IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
634
100
  IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
635
100
  Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
636
637
  // In the virtual path, the adjustment left 'This' pointing to the
638
  // vtable of the correct base subobject.  The "function pointer" is an
639
  // offset within the vtable (+1 for the virtual flag on non-ARM).
640
100
  CGF.EmitBlock(FnVirtual);
641
642
  // Cast the adjusted this to a pointer to vtable pointer and load.
643
100
  llvm::Type *VTableTy = Builder.getInt8PtrTy();
644
100
  CharUnits VTablePtrAlign =
645
100
    CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
646
100
                                      CGF.getPointerAlign());
647
100
  llvm::Value *VTable = CGF.GetVTablePtr(
648
100
      Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
649
650
  // Apply the offset.
651
  // On ARM64, to reserve extra space in virtual member function pointers,
652
  // we only pay attention to the low 32 bits of the offset.
653
100
  llvm::Value *VTableOffset = FnAsInt;
654
100
  if (!UseARMMethodPtrABI)
655
77
    VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
656
100
  if (Use32BitVTableOffsetABI) {
657
4
    VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
658
4
    VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
659
4
  }
660
661
  // Check the address of the function pointer if CFI on member function
662
  // pointers is enabled.
663
100
  llvm::Constant *CheckSourceLocation;
664
100
  llvm::Constant *CheckTypeDesc;
665
100
  bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
666
100
                            
CGM.HasHiddenLTOVisibility(RD)3
;
667
100
  bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
668
100
                           
CGM.HasHiddenLTOVisibility(RD)1
;
669
100
  bool ShouldEmitWPDInfo =
670
100
      CGM.getCodeGenOpts().WholeProgramVTables &&
671
      // Don't insert type tests if we are forcing public visibility.
672
100
      
!CGM.AlwaysHasLTOVisibilityPublic(RD)3
;
673
100
  llvm::Value *VirtualFn = nullptr;
674
675
100
  {
676
100
    CodeGenFunction::SanitizerScope SanScope(&CGF);
677
100
    llvm::Value *TypeId = nullptr;
678
100
    llvm::Value *CheckResult = nullptr;
679
680
100
    if (ShouldEmitCFICheck || 
ShouldEmitVFEInfo98
||
ShouldEmitWPDInfo97
) {
681
      // If doing CFI, VFE or WPD, we will need the metadata node to check
682
      // against.
683
5
      llvm::Metadata *MD =
684
5
          CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
685
5
      TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
686
5
    }
687
688
100
    if (ShouldEmitVFEInfo) {
689
1
      llvm::Value *VFPAddr =
690
1
          Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
691
692
      // If doing VFE, load from the vtable with a type.checked.load intrinsic
693
      // call. Note that we use the GEP to calculate the address to load from
694
      // and pass 0 as the offset to the intrinsic. This is because every
695
      // vtable slot of the correct type is marked with matching metadata, and
696
      // we know that the load must be from one of these slots.
697
1
      llvm::Value *CheckedLoad = Builder.CreateCall(
698
1
          CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
699
1
          {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
700
1
      CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
701
1
      VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
702
1
      VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
703
1
                                        "memptr.virtualfn");
704
99
    } else {
705
      // When not doing VFE, emit a normal load, as it allows more
706
      // optimisations than type.checked.load.
707
99
      if (ShouldEmitCFICheck || 
ShouldEmitWPDInfo97
) {
708
4
        llvm::Value *VFPAddr =
709
4
            Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
710
4
        CheckResult = Builder.CreateCall(
711
4
            CGM.getIntrinsic(llvm::Intrinsic::type_test),
712
4
            {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
713
4
      }
714
715
99
      if (CGM.getItaniumVTableContext().isRelativeLayout()) {
716
1
        VirtualFn = CGF.Builder.CreateCall(
717
1
            CGM.getIntrinsic(llvm::Intrinsic::load_relative,
718
1
                             {VTableOffset->getType()}),
719
1
            {VTable, VTableOffset});
720
1
        VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
721
98
      } else {
722
98
        llvm::Value *VFPAddr =
723
98
            CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
724
98
        VFPAddr = CGF.Builder.CreateBitCast(
725
98
            VFPAddr, FTy->getPointerTo()->getPointerTo());
726
98
        VirtualFn = CGF.Builder.CreateAlignedLoad(
727
98
            FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
728
98
            "memptr.virtualfn");
729
98
      }
730
99
    }
731
100
    assert(VirtualFn && "Virtual fuction pointer not created!");
732
0
    assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
733
100
            CheckResult) &&
734
100
           "Check result required but not created!");
735
736
100
    if (ShouldEmitCFICheck) {
737
      // If doing CFI, emit the check.
738
2
      CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
739
2
      CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
740
2
      llvm::Constant *StaticData[] = {
741
2
          llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
742
2
          CheckSourceLocation,
743
2
          CheckTypeDesc,
744
2
      };
745
746
2
      if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
747
2
        CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
748
2
      } else {
749
0
        llvm::Value *AllVtables = llvm::MetadataAsValue::get(
750
0
            CGM.getLLVMContext(),
751
0
            llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
752
0
        llvm::Value *ValidVtable = Builder.CreateCall(
753
0
            CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
754
0
        CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
755
0
                      SanitizerHandler::CFICheckFail, StaticData,
756
0
                      {VTable, ValidVtable});
757
0
      }
758
759
2
      FnVirtual = Builder.GetInsertBlock();
760
2
    }
761
100
  } // End of sanitizer scope
762
763
0
  CGF.EmitBranch(FnEnd);
764
765
  // In the non-virtual path, the function pointer is actually a
766
  // function pointer.
767
100
  CGF.EmitBlock(FnNonVirtual);
768
100
  llvm::Value *NonVirtualFn =
769
100
    Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
770
771
  // Check the function pointer if CFI on member function pointers is enabled.
772
100
  if (ShouldEmitCFICheck) {
773
2
    CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
774
2
    if (RD->hasDefinition()) {
775
1
      CodeGenFunction::SanitizerScope SanScope(&CGF);
776
777
1
      llvm::Constant *StaticData[] = {
778
1
          llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
779
1
          CheckSourceLocation,
780
1
          CheckTypeDesc,
781
1
      };
782
783
1
      llvm::Value *Bit = Builder.getFalse();
784
1
      llvm::Value *CastedNonVirtualFn =
785
1
          Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
786
2
      for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
787
2
        llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
788
2
            getContext().getMemberPointerType(
789
2
                MPT->getPointeeType(),
790
2
                getContext().getRecordType(Base).getTypePtr()));
791
2
        llvm::Value *TypeId =
792
2
            llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
793
794
2
        llvm::Value *TypeTest =
795
2
            Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
796
2
                               {CastedNonVirtualFn, TypeId});
797
2
        Bit = Builder.CreateOr(Bit, TypeTest);
798
2
      }
799
800
1
      CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
801
1
                    SanitizerHandler::CFICheckFail, StaticData,
802
1
                    {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
803
804
1
      FnNonVirtual = Builder.GetInsertBlock();
805
1
    }
806
2
  }
807
808
  // We're done.
809
100
  CGF.EmitBlock(FnEnd);
810
100
  llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
811
100
  CalleePtr->addIncoming(VirtualFn, FnVirtual);
812
100
  CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
813
814
100
  CGCallee Callee(FPT, CalleePtr);
815
100
  return Callee;
816
100
}
817
818
/// Compute an l-value by applying the given pointer-to-member to a
819
/// base object.
820
llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
821
    CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
822
57
    const MemberPointerType *MPT) {
823
57
  assert(MemPtr->getType() == CGM.PtrDiffTy);
824
825
0
  CGBuilderTy &Builder = CGF.Builder;
826
827
  // Cast to char*.
828
57
  Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
829
830
  // Apply the offset, which we assume is non-null.
831
57
  llvm::Value *Addr = Builder.CreateInBoundsGEP(
832
57
      Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
833
834
  // Cast the address to the appropriate pointer type, adopting the
835
  // address space of the base pointer.
836
57
  llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
837
57
                            ->getPointerTo(Base.getAddressSpace());
838
57
  return Builder.CreateBitCast(Addr, PType);
839
57
}
840
841
/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
842
/// conversion.
843
///
844
/// Bitcast conversions are always a no-op under Itanium.
845
///
846
/// Obligatory offset/adjustment diagram:
847
///         <-- offset -->          <-- adjustment -->
848
///   |--------------------------|----------------------|--------------------|
849
///   ^Derived address point     ^Base address point    ^Member address point
850
///
851
/// So when converting a base member pointer to a derived member pointer,
852
/// we add the offset to the adjustment because the address point has
853
/// decreased;  and conversely, when converting a derived MP to a base MP
854
/// we subtract the offset from the adjustment because the address point
855
/// has increased.
856
///
857
/// The standard forbids (at compile time) conversion to and from
858
/// virtual bases, which is why we don't have to consider them here.
859
///
860
/// The standard forbids (at run time) casting a derived MP to a base
861
/// MP when the derived MP does not point to a member of the base.
862
/// This is why -1 is a reasonable choice for null data member
863
/// pointers.
864
llvm::Value *
865
ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
866
                                           const CastExpr *E,
867
52
                                           llvm::Value *src) {
868
52
  assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
869
52
         E->getCastKind() == CK_BaseToDerivedMemberPointer ||
870
52
         E->getCastKind() == CK_ReinterpretMemberPointer);
871
872
  // Under Itanium, reinterprets don't require any additional processing.
873
52
  if (E->getCastKind() == CK_ReinterpretMemberPointer) 
return src3
;
874
875
  // Use constant emission if we can.
876
49
  if (isa<llvm::Constant>(src))
877
30
    return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
878
879
19
  llvm::Constant *adj = getMemberPointerAdjustment(E);
880
19
  if (!adj) 
return src3
;
881
882
16
  CGBuilderTy &Builder = CGF.Builder;
883
16
  bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
884
885
16
  const MemberPointerType *destTy =
886
16
    E->getType()->castAs<MemberPointerType>();
887
888
  // For member data pointers, this is just a matter of adding the
889
  // offset if the source is non-null.
890
16
  if (destTy->isMemberDataPointer()) {
891
2
    llvm::Value *dst;
892
2
    if (isDerivedToBase)
893
1
      dst = Builder.CreateNSWSub(src, adj, "adj");
894
1
    else
895
1
      dst = Builder.CreateNSWAdd(src, adj, "adj");
896
897
    // Null check.
898
2
    llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
899
2
    llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
900
2
    return Builder.CreateSelect(isNull, src, dst);
901
2
  }
902
903
  // The this-adjustment is left-shifted by 1 on ARM.
904
14
  if (UseARMMethodPtrABI) {
905
6
    uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
906
6
    offset <<= 1;
907
6
    adj = llvm::ConstantInt::get(adj->getType(), offset);
908
6
  }
909
910
14
  llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
911
14
  llvm::Value *dstAdj;
912
14
  if (isDerivedToBase)
913
7
    dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
914
7
  else
915
7
    dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
916
917
14
  return Builder.CreateInsertValue(src, dstAdj, 1);
918
16
}
919
920
llvm::Constant *
921
ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
922
66
                                           llvm::Constant *src) {
923
66
  assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
924
66
         E->getCastKind() == CK_BaseToDerivedMemberPointer ||
925
66
         E->getCastKind() == CK_ReinterpretMemberPointer);
926
927
  // Under Itanium, reinterprets don't require any additional processing.
928
66
  if (E->getCastKind() == CK_ReinterpretMemberPointer) 
return src29
;
929
930
  // If the adjustment is trivial, we don't need to do anything.
931
37
  llvm::Constant *adj = getMemberPointerAdjustment(E);
932
37
  if (!adj) 
return src30
;
933
934
7
  bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
935
936
7
  const MemberPointerType *destTy =
937
7
    E->getType()->castAs<MemberPointerType>();
938
939
  // For member data pointers, this is just a matter of adding the
940
  // offset if the source is non-null.
941
7
  if (destTy->isMemberDataPointer()) {
942
    // null maps to null.
943
0
    if (src->isAllOnesValue()) return src;
944
945
0
    if (isDerivedToBase)
946
0
      return llvm::ConstantExpr::getNSWSub(src, adj);
947
0
    else
948
0
      return llvm::ConstantExpr::getNSWAdd(src, adj);
949
0
  }
950
951
  // The this-adjustment is left-shifted by 1 on ARM.
952
7
  if (UseARMMethodPtrABI) {
953
3
    uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
954
3
    offset <<= 1;
955
3
    adj = llvm::ConstantInt::get(adj->getType(), offset);
956
3
  }
957
958
7
  llvm::Constant *srcAdj = src->getAggregateElement(1);
959
7
  llvm::Constant *dstAdj;
960
7
  if (isDerivedToBase)
961
0
    dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
962
7
  else
963
7
    dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
964
965
7
  llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1);
966
7
  assert(res != nullptr && "Folding must succeed");
967
0
  return res;
968
7
}
969
970
llvm::Constant *
971
91
ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
972
  // Itanium C++ ABI 2.3:
973
  //   A NULL pointer is represented as -1.
974
91
  if (MPT->isMemberDataPointer())
975
62
    return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
976
977
29
  llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
978
29
  llvm::Constant *Values[2] = { Zero, Zero };
979
29
  return llvm::ConstantStruct::getAnon(Values);
980
91
}
981
982
llvm::Constant *
983
ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
984
61
                                     CharUnits offset) {
985
  // Itanium C++ ABI 2.3:
986
  //   A pointer to data member is an offset from the base address of
987
  //   the class object containing it, represented as a ptrdiff_t
988
61
  return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
989
61
}
990
991
llvm::Constant *
992
242
ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
993
242
  return BuildMemberPointer(MD, CharUnits::Zero());
994
242
}
995
996
llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
997
418
                                                  CharUnits ThisAdjustment) {
998
418
  assert(MD->isInstance() && "Member function must not be static!");
999
1000
0
  CodeGenTypes &Types = CGM.getTypes();
1001
1002
  // Get the function pointer (or index if this is a virtual function).
1003
418
  llvm::Constant *MemPtr[2];
1004
418
  if (MD->isVirtual()) {
1005
114
    uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1006
114
    uint64_t VTableOffset;
1007
114
    if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1008
      // Multiply by 4-byte relative offsets.
1009
0
      VTableOffset = Index * 4;
1010
114
    } else {
1011
114
      const ASTContext &Context = getContext();
1012
114
      CharUnits PointerWidth = Context.toCharUnitsFromBits(
1013
114
          Context.getTargetInfo().getPointerWidth(0));
1014
114
      VTableOffset = Index * PointerWidth.getQuantity();
1015
114
    }
1016
1017
114
    if (UseARMMethodPtrABI) {
1018
      // ARM C++ ABI 3.2.1:
1019
      //   This ABI specifies that adj contains twice the this
1020
      //   adjustment, plus 1 if the member function is virtual. The
1021
      //   least significant bit of adj then makes exactly the same
1022
      //   discrimination as the least significant bit of ptr does for
1023
      //   Itanium.
1024
38
      MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1025
38
      MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1026
38
                                         2 * ThisAdjustment.getQuantity() + 1);
1027
76
    } else {
1028
      // Itanium C++ ABI 2.3:
1029
      //   For a virtual function, [the pointer field] is 1 plus the
1030
      //   virtual table offset (in bytes) of the function,
1031
      //   represented as a ptrdiff_t.
1032
76
      MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1033
76
      MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1034
76
                                         ThisAdjustment.getQuantity());
1035
76
    }
1036
304
  } else {
1037
304
    const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1038
304
    llvm::Type *Ty;
1039
    // Check whether the function has a computable LLVM signature.
1040
304
    if (Types.isFuncTypeConvertible(FPT)) {
1041
      // The function has a computable LLVM signature; use the correct type.
1042
302
      Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1043
302
    } else {
1044
      // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1045
      // function type is incomplete.
1046
2
      Ty = CGM.PtrDiffTy;
1047
2
    }
1048
304
    llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1049
1050
304
    MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1051
304
    MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1052
304
                                       (UseARMMethodPtrABI ? 
246
:
1258
) *
1053
304
                                       ThisAdjustment.getQuantity());
1054
304
  }
1055
1056
418
  return llvm::ConstantStruct::getAnon(MemPtr);
1057
418
}
1058
1059
llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1060
206
                                                 QualType MPType) {
1061
206
  const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1062
206
  const ValueDecl *MPD = MP.getMemberPointerDecl();
1063
206
  if (!MPD)
1064
4
    return EmitNullMemberPointer(MPT);
1065
1066
202
  CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1067
1068
202
  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1069
176
    return BuildMemberPointer(MD, ThisAdjustment);
1070
1071
26
  CharUnits FieldOffset =
1072
26
    getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1073
26
  return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1074
202
}
1075
1076
/// The comparison algorithm is pretty easy: the member pointers are
1077
/// the same if they're either bitwise identical *or* both null.
1078
///
1079
/// ARM is different here only because null-ness is more complicated.
1080
llvm::Value *
1081
ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1082
                                           llvm::Value *L,
1083
                                           llvm::Value *R,
1084
                                           const MemberPointerType *MPT,
1085
11
                                           bool Inequality) {
1086
11
  CGBuilderTy &Builder = CGF.Builder;
1087
1088
11
  llvm::ICmpInst::Predicate Eq;
1089
11
  llvm::Instruction::BinaryOps And, Or;
1090
11
  if (Inequality) {
1091
2
    Eq = llvm::ICmpInst::ICMP_NE;
1092
2
    And = llvm::Instruction::Or;
1093
2
    Or = llvm::Instruction::And;
1094
9
  } else {
1095
9
    Eq = llvm::ICmpInst::ICMP_EQ;
1096
9
    And = llvm::Instruction::And;
1097
9
    Or = llvm::Instruction::Or;
1098
9
  }
1099
1100
  // Member data pointers are easy because there's a unique null
1101
  // value, so it just comes down to bitwise equality.
1102
11
  if (MPT->isMemberDataPointer())
1103
4
    return Builder.CreateICmp(Eq, L, R);
1104
1105
  // For member function pointers, the tautologies are more complex.
1106
  // The Itanium tautology is:
1107
  //   (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1108
  // The ARM tautology is:
1109
  //   (L == R) <==> (L.ptr == R.ptr &&
1110
  //                  (L.adj == R.adj ||
1111
  //                   (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1112
  // The inequality tautologies have exactly the same structure, except
1113
  // applying De Morgan's laws.
1114
1115
7
  llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1116
7
  llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1117
1118
  // This condition tests whether L.ptr == R.ptr.  This must always be
1119
  // true for equality to hold.
1120
7
  llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1121
1122
  // This condition, together with the assumption that L.ptr == R.ptr,
1123
  // tests whether the pointers are both null.  ARM imposes an extra
1124
  // condition.
1125
7
  llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1126
7
  llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1127
1128
  // This condition tests whether L.adj == R.adj.  If this isn't
1129
  // true, the pointers are unequal unless they're both null.
1130
7
  llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1131
7
  llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1132
7
  llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1133
1134
  // Null member function pointers on ARM clear the low bit of Adj,
1135
  // so the zero condition has to check that neither low bit is set.
1136
7
  if (UseARMMethodPtrABI) {
1137
3
    llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1138
1139
    // Compute (l.adj | r.adj) & 1 and test it against zero.
1140
3
    llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1141
3
    llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1142
3
    llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1143
3
                                                      "cmp.or.adj");
1144
3
    EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1145
3
  }
1146
1147
  // Tie together all our conditions.
1148
7
  llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1149
7
  Result = Builder.CreateBinOp(And, PtrEq, Result,
1150
7
                               Inequality ? 
"memptr.ne"0
: "memptr.eq");
1151
7
  return Result;
1152
11
}
1153
1154
llvm::Value *
1155
ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1156
                                          llvm::Value *MemPtr,
1157
49
                                          const MemberPointerType *MPT) {
1158
49
  CGBuilderTy &Builder = CGF.Builder;
1159
1160
  /// For member data pointers, this is just a check against -1.
1161
49
  if (MPT->isMemberDataPointer()) {
1162
6
    assert(MemPtr->getType() == CGM.PtrDiffTy);
1163
0
    llvm::Value *NegativeOne =
1164
6
      llvm::Constant::getAllOnesValue(MemPtr->getType());
1165
6
    return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1166
6
  }
1167
1168
  // In Itanium, a member function pointer is not null if 'ptr' is not null.
1169
43
  llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1170
1171
43
  llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1172
43
  llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1173
1174
  // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1175
  // (the virtual bit) is set.
1176
43
  if (UseARMMethodPtrABI) {
1177
15
    llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1178
15
    llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1179
15
    llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1180
15
    llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1181
15
                                                  "memptr.isvirtual");
1182
15
    Result = Builder.CreateOr(Result, IsVirtual);
1183
15
  }
1184
1185
43
  return Result;
1186
49
}
1187
1188
227k
bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1189
227k
  const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1190
227k
  if (!RD)
1191
223k
    return false;
1192
1193
  // If C++ prohibits us from making a copy, return by address.
1194
4.89k
  if (!RD->canPassInRegisters()) {
1195
932
    auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1196
932
    FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1197
932
    return true;
1198
932
  }
1199
3.96k
  return false;
1200
4.89k
}
1201
1202
/// The Itanium ABI requires non-zero initialization only for data
1203
/// member pointers, for which '0' is a valid offset.
1204
174
bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1205
174
  return MPT->isMemberFunctionPointer();
1206
174
}
1207
1208
/// The Itanium ABI always places an offset to the complete object
1209
/// at entry -2 in the vtable.
1210
void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1211
                                            const CXXDeleteExpr *DE,
1212
                                            Address Ptr,
1213
                                            QualType ElementType,
1214
23
                                            const CXXDestructorDecl *Dtor) {
1215
23
  bool UseGlobalDelete = DE->isGlobalDelete();
1216
23
  if (UseGlobalDelete) {
1217
    // Derive the complete-object pointer, which is what we need
1218
    // to pass to the deallocation function.
1219
1220
    // Grab the vtable pointer as an intptr_t*.
1221
6
    auto *ClassDecl =
1222
6
        cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1223
6
    llvm::Value *VTable =
1224
6
        CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1225
1226
    // Track back to entry -2 and pull out the offset there.
1227
6
    llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1228
6
        CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1229
6
    llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr,                                                        CGF.getPointerAlign());
1230
1231
    // Apply the offset.
1232
6
    llvm::Value *CompletePtr =
1233
6
      CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1234
6
    CompletePtr =
1235
6
        CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1236
1237
    // If we're supposed to call the global delete, make sure we do so
1238
    // even if the destructor throws.
1239
6
    CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1240
6
                                    ElementType);
1241
6
  }
1242
1243
  // FIXME: Provide a source location here even though there's no
1244
  // CXXMemberCallExpr for dtor call.
1245
23
  CXXDtorType DtorType = UseGlobalDelete ? 
Dtor_Complete6
:
Dtor_Deleting17
;
1246
23
  EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1247
1248
23
  if (UseGlobalDelete)
1249
6
    CGF.PopCleanupBlock();
1250
23
}
1251
1252
66
void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1253
  // void __cxa_rethrow();
1254
1255
66
  llvm::FunctionType *FTy =
1256
66
    llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1257
1258
66
  llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1259
1260
66
  if (isNoReturn)
1261
53
    CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1262
13
  else
1263
13
    CGF.EmitRuntimeCallOrInvoke(Fn);
1264
66
}
1265
1266
661
static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1267
  // void *__cxa_allocate_exception(size_t thrown_size);
1268
1269
661
  llvm::FunctionType *FTy =
1270
661
    llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1271
1272
661
  return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1273
661
}
1274
1275
661
static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1276
  // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1277
  //                  void (*dest) (void *));
1278
1279
661
  llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1280
661
  llvm::FunctionType *FTy =
1281
661
    llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1282
1283
661
  return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1284
661
}
1285
1286
661
void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1287
661
  QualType ThrowType = E->getSubExpr()->getType();
1288
  // Now allocate the exception object.
1289
661
  llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1290
661
  uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1291
1292
661
  llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1293
661
  llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1294
661
      AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1295
1296
661
  CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1297
661
  CGF.EmitAnyExprToExn(
1298
661
      E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1299
1300
  // Now throw the exception.
1301
661
  llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1302
661
                                                         /*ForEH=*/true);
1303
1304
  // The address of the destructor.  If the exception type has a
1305
  // trivial destructor (or isn't a record), we just pass null.
1306
661
  llvm::Constant *Dtor = nullptr;
1307
661
  if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1308
551
    CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1309
551
    if (!Record->hasTrivialDestructor()) {
1310
534
      CXXDestructorDecl *DtorD = Record->getDestructor();
1311
534
      Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1312
534
      Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1313
534
    }
1314
551
  }
1315
661
  if (!Dtor) 
Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy)127
;
1316
1317
661
  llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1318
661
  CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1319
661
}
1320
1321
56
static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1322
  // void *__dynamic_cast(const void *sub,
1323
  //                      const abi::__class_type_info *src,
1324
  //                      const abi::__class_type_info *dst,
1325
  //                      std::ptrdiff_t src2dst_offset);
1326
1327
56
  llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1328
56
  llvm::Type *PtrDiffTy =
1329
56
    CGF.ConvertType(CGF.getContext().getPointerDiffType());
1330
1331
56
  llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1332
1333
56
  llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1334
1335
  // Mark the function as nounwind readonly.
1336
56
  llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1337
56
                                            llvm::Attribute::ReadOnly };
1338
56
  llvm::AttributeList Attrs = llvm::AttributeList::get(
1339
56
      CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1340
1341
56
  return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1342
56
}
1343
1344
9
static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1345
  // void __cxa_bad_cast();
1346
9
  llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1347
9
  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1348
9
}
1349
1350
/// Compute the src2dst_offset hint as described in the
1351
/// Itanium C++ ABI [2.9.7]
1352
static CharUnits computeOffsetHint(ASTContext &Context,
1353
                                   const CXXRecordDecl *Src,
1354
56
                                   const CXXRecordDecl *Dst) {
1355
56
  CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1356
56
                     /*DetectVirtual=*/false);
1357
1358
  // If Dst is not derived from Src we can skip the whole computation below and
1359
  // return that Src is not a public base of Dst.  Record all inheritance paths.
1360
56
  if (!Dst->isDerivedFrom(Src, Paths))
1361
5
    return CharUnits::fromQuantity(-2ULL);
1362
1363
51
  unsigned NumPublicPaths = 0;
1364
51
  CharUnits Offset;
1365
1366
  // Now walk all possible inheritance paths.
1367
57
  for (const CXXBasePath &Path : Paths) {
1368
57
    if (Path.Access != AS_public)  // Ignore non-public inheritance.
1369
9
      continue;
1370
1371
48
    ++NumPublicPaths;
1372
1373
75
    for (const CXXBasePathElement &PathElement : Path) {
1374
      // If the path contains a virtual base class we can't give any hint.
1375
      // -1: no hint.
1376
75
      if (PathElement.Base->isVirtual())
1377
9
        return CharUnits::fromQuantity(-1ULL);
1378
1379
66
      if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1380
9
        continue;
1381
1382
      // Accumulate the base class offsets.
1383
57
      const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1384
57
      Offset += L.getBaseClassOffset(
1385
57
          PathElement.Base->getType()->getAsCXXRecordDecl());
1386
57
    }
1387
48
  }
1388
1389
  // -2: Src is not a public base of Dst.
1390
42
  if (NumPublicPaths == 0)
1391
9
    return CharUnits::fromQuantity(-2ULL);
1392
1393
  // -3: Src is a multiple public base type but never a virtual base type.
1394
33
  if (NumPublicPaths > 1)
1395
0
    return CharUnits::fromQuantity(-3ULL);
1396
1397
  // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1398
  // Return the offset of Src from the origin of Dst.
1399
33
  return Offset;
1400
33
}
1401
1402
19
static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1403
  // void __cxa_bad_typeid();
1404
19
  llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1405
1406
19
  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1407
19
}
1408
1409
bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1410
28
                                              QualType SrcRecordTy) {
1411
28
  return IsDeref;
1412
28
}
1413
1414
19
void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1415
19
  llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1416
19
  llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1417
19
  Call->setDoesNotReturn();
1418
19
  CGF.Builder.CreateUnreachable();
1419
19
}
1420
1421
llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1422
                                       QualType SrcRecordTy,
1423
                                       Address ThisPtr,
1424
28
                                       llvm::Type *StdTypeInfoPtrTy) {
1425
28
  auto *ClassDecl =
1426
28
      cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1427
28
  llvm::Value *Value =
1428
28
      CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1429
1430
28
  if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1431
    // Load the type info.
1432
1
    Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1433
1
    Value = CGF.Builder.CreateCall(
1434
1
        CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1435
1
        {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1436
1437
    // Setup to dereference again since this is a proxy we accessed.
1438
1
    Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
1439
27
  } else {
1440
    // Load the type info.
1441
27
    Value =
1442
27
        CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1443
27
  }
1444
28
  return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1445
28
                                       CGF.getPointerAlign());
1446
28
}
1447
1448
bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1449
59
                                                       QualType SrcRecordTy) {
1450
59
  return SrcIsPtr;
1451
59
}
1452
1453
llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1454
    CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1455
56
    QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1456
56
  llvm::Type *PtrDiffLTy =
1457
56
      CGF.ConvertType(CGF.getContext().getPointerDiffType());
1458
56
  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1459
1460
56
  llvm::Value *SrcRTTI =
1461
56
      CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1462
56
  llvm::Value *DestRTTI =
1463
56
      CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1464
1465
  // Compute the offset hint.
1466
56
  const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1467
56
  const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1468
56
  llvm::Value *OffsetHint = llvm::ConstantInt::get(
1469
56
      PtrDiffLTy,
1470
56
      computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1471
1472
  // Emit the call to __dynamic_cast.
1473
56
  llvm::Value *Value = ThisAddr.getPointer();
1474
56
  Value = CGF.EmitCastToVoidPtr(Value);
1475
1476
56
  llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1477
56
  Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1478
56
  Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1479
1480
  /// C++ [expr.dynamic.cast]p9:
1481
  ///   A failed cast to reference type throws std::bad_cast
1482
56
  if (DestTy->isReferenceType()) {
1483
8
    llvm::BasicBlock *BadCastBlock =
1484
8
        CGF.createBasicBlock("dynamic_cast.bad_cast");
1485
1486
8
    llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1487
8
    CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1488
1489
8
    CGF.EmitBlock(BadCastBlock);
1490
8
    EmitBadCastCall(CGF);
1491
8
  }
1492
1493
56
  return Value;
1494
56
}
1495
1496
llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1497
                                                  Address ThisAddr,
1498
                                                  QualType SrcRecordTy,
1499
3
                                                  QualType DestTy) {
1500
3
  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1501
3
  auto *ClassDecl =
1502
3
      cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1503
3
  llvm::Value *OffsetToTop;
1504
3
  if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1505
    // Get the vtable pointer.
1506
1
    llvm::Value *VTable =
1507
1
        CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
1508
1509
    // Get the offset-to-top from the vtable.
1510
1
    OffsetToTop =
1511
1
        CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1512
1
    OffsetToTop = CGF.Builder.CreateAlignedLoad(
1513
1
        CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1514
2
  } else {
1515
2
    llvm::Type *PtrDiffLTy =
1516
2
        CGF.ConvertType(CGF.getContext().getPointerDiffType());
1517
1518
    // Get the vtable pointer.
1519
2
    llvm::Value *VTable =
1520
2
        CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
1521
1522
    // Get the offset-to-top from the vtable.
1523
2
    OffsetToTop =
1524
2
        CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1525
2
    OffsetToTop = CGF.Builder.CreateAlignedLoad(
1526
2
        PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1527
2
  }
1528
  // Finally, add the offset to the pointer.
1529
3
  llvm::Value *Value = ThisAddr.getPointer();
1530
3
  Value = CGF.EmitCastToVoidPtr(Value);
1531
3
  Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
1532
3
  return CGF.Builder.CreateBitCast(Value, DestLTy);
1533
3
}
1534
1535
9
bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1536
9
  llvm::FunctionCallee Fn = getBadCastFn(CGF);
1537
9
  llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1538
9
  Call->setDoesNotReturn();
1539
9
  CGF.Builder.CreateUnreachable();
1540
9
  return true;
1541
9
}
1542
1543
llvm::Value *
1544
ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1545
                                         Address This,
1546
                                         const CXXRecordDecl *ClassDecl,
1547
517
                                         const CXXRecordDecl *BaseClassDecl) {
1548
517
  llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1549
517
  CharUnits VBaseOffsetOffset =
1550
517
      CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1551
517
                                                               BaseClassDecl);
1552
517
  llvm::Value *VBaseOffsetPtr =
1553
517
    CGF.Builder.CreateConstGEP1_64(
1554
517
        CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1555
517
        "vbase.offset.ptr");
1556
1557
517
  llvm::Value *VBaseOffset;
1558
517
  if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1559
3
    VBaseOffsetPtr =
1560
3
        CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
1561
3
    VBaseOffset = CGF.Builder.CreateAlignedLoad(
1562
3
        CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1563
3
        "vbase.offset");
1564
514
  } else {
1565
514
    VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1566
514
                                               CGM.PtrDiffTy->getPointerTo());
1567
514
    VBaseOffset = CGF.Builder.CreateAlignedLoad(
1568
514
        CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1569
514
  }
1570
517
  return VBaseOffset;
1571
517
}
1572
1573
68.9k
void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1574
  // Just make sure we're in sync with TargetCXXABI.
1575
68.9k
  assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1576
1577
  // The constructor used for constructing this as a base class;
1578
  // ignores virtual bases.
1579
0
  CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1580
1581
  // The constructor used for constructing this as a complete class;
1582
  // constructs the virtual bases, then calls the base constructor.
1583
68.9k
  if (!D->getParent()->isAbstract()) {
1584
    // We don't need to emit the complete ctor if the class is abstract.
1585
67.8k
    CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1586
67.8k
  }
1587
68.9k
}
1588
1589
CGCXXABI::AddedStructorArgCounts
1590
ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1591
288k
                                      SmallVectorImpl<CanQualType> &ArgTys) {
1592
288k
  ASTContext &Context = getContext();
1593
1594
  // All parameters are already in place except VTT, which goes after 'this'.
1595
  // These are Clang types, so we don't need to worry about sret yet.
1596
1597
  // Check if we need to add a VTT parameter (which has type void **).
1598
288k
  if ((isa<CXXConstructorDecl>(GD.getDecl()) ? 
GD.getCtorType() == Ctor_Base172k
1599
288k
                                             : 
GD.getDtorType() == Dtor_Base115k
) &&
1600
288k
      
cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0132k
) {
1601
1.17k
    ArgTys.insert(ArgTys.begin() + 1,
1602
1.17k
                  Context.getPointerType(Context.VoidPtrTy));
1603
1.17k
    return AddedStructorArgCounts::prefix(1);
1604
1.17k
  }
1605
287k
  return AddedStructorArgCounts{};
1606
288k
}
1607
1608
9.39k
void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1609
  // The destructor used for destructing this as a base class; ignores
1610
  // virtual bases.
1611
9.39k
  CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1612
1613
  // The destructor used for destructing this as a most-derived class;
1614
  // call the base destructor and then destructs any virtual bases.
1615
9.39k
  CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1616
1617
  // The destructor in a virtual table is always a 'deleting'
1618
  // destructor, which calls the complete destructor and then uses the
1619
  // appropriate operator delete.
1620
9.39k
  if (D->isVirtual())
1621
889
    CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1622
9.39k
}
1623
1624
void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1625
                                              QualType &ResTy,
1626
55.1k
                                              FunctionArgList &Params) {
1627
55.1k
  const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1628
55.1k
  assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1629
1630
  // Check if we need a VTT parameter as well.
1631
55.1k
  if (NeedsVTTParameter(CGF.CurGD)) {
1632
264
    ASTContext &Context = getContext();
1633
1634
    // FIXME: avoid the fake decl
1635
264
    QualType T = Context.getPointerType(Context.VoidPtrTy);
1636
264
    auto *VTTDecl = ImplicitParamDecl::Create(
1637
264
        Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1638
264
        T, ImplicitParamDecl::CXXVTT);
1639
264
    Params.insert(Params.begin() + 1, VTTDecl);
1640
264
    getStructorImplicitParamDecl(CGF) = VTTDecl;
1641
264
  }
1642
55.1k
}
1643
1644
100k
void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1645
  // Naked functions have no prolog.
1646
100k
  if (CGF.CurFuncDecl && 
CGF.CurFuncDecl->hasAttr<NakedAttr>()100k
)
1647
1
    return;
1648
1649
  /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1650
  /// adjustments are required, because they are all handled by thunks.
1651
100k
  setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1652
1653
  /// Initialize the 'vtt' slot if needed.
1654
100k
  if (getStructorImplicitParamDecl(CGF)) {
1655
264
    getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1656
264
        CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1657
264
  }
1658
1659
  /// If this is a function that the ABI specifies returns 'this', initialize
1660
  /// the return slot to 'this' at the start of the function.
1661
  ///
1662
  /// Unlike the setting of return types, this is done within the ABI
1663
  /// implementation instead of by clients of CGCXXABI because:
1664
  /// 1) getThisValue is currently protected
1665
  /// 2) in theory, an ABI could implement 'this' returns some other way;
1666
  ///    HasThisReturn only specifies a contract, not the implementation
1667
100k
  if (HasThisReturn(CGF.CurGD))
1668
393
    CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1669
100k
}
1670
1671
CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1672
    CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1673
53.2k
    bool ForVirtualBase, bool Delegating) {
1674
53.2k
  if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1675
53.1k
    return AddedStructorArgs{};
1676
1677
  // Insert the implicit 'vtt' argument as the second argument.
1678
134
  llvm::Value *VTT =
1679
134
      CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1680
134
  QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1681
134
  return AddedStructorArgs::prefix({{VTT, VTTTy}});
1682
53.2k
}
1683
1684
llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1685
    CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1686
29.0k
    bool ForVirtualBase, bool Delegating) {
1687
29.0k
  GlobalDecl GD(DD, Type);
1688
29.0k
  return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1689
29.0k
}
1690
1691
void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1692
                                       const CXXDestructorDecl *DD,
1693
                                       CXXDtorType Type, bool ForVirtualBase,
1694
                                       bool Delegating, Address This,
1695
29.0k
                                       QualType ThisTy) {
1696
29.0k
  GlobalDecl GD(DD, Type);
1697
29.0k
  llvm::Value *VTT =
1698
29.0k
      getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1699
29.0k
  QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1700
1701
29.0k
  CGCallee Callee;
1702
29.0k
  if (getContext().getLangOpts().AppleKext &&
1703
29.0k
      
Type != Dtor_Base13
&&
DD->isVirtual()7
)
1704
7
    Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1705
29.0k
  else
1706
29.0k
    Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1707
1708
29.0k
  CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1709
29.0k
                            nullptr);
1710
29.0k
}
1711
1712
void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1713
2.43k
                                          const CXXRecordDecl *RD) {
1714
2.43k
  llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1715
2.43k
  if (VTable->hasInitializer())
1716
799
    return;
1717
1718
1.63k
  ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1719
1.63k
  const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1720
1.63k
  llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1721
1.63k
  llvm::Constant *RTTI =
1722
1.63k
      CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1723
1724
  // Create and set the initializer.
1725
1.63k
  ConstantInitBuilder builder(CGM);
1726
1.63k
  auto components = builder.beginStruct();
1727
1.63k
  CGVT.createVTableInitializer(components, VTLayout, RTTI,
1728
1.63k
                               llvm::GlobalValue::isLocalLinkage(Linkage));
1729
1.63k
  components.finishAndSetAsInitializer(VTable);
1730
1731
  // Set the correct linkage.
1732
1.63k
  VTable->setLinkage(Linkage);
1733
1734
1.63k
  if (CGM.supportsCOMDAT() && 
VTable->isWeakForLinker()675
)
1735
369
    VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1736
1737
  // Set the right visibility.
1738
1.63k
  CGM.setGVProperties(VTable, RD);
1739
1740
  // If this is the magic class __cxxabiv1::__fundamental_type_info,
1741
  // we will emit the typeinfo for the fundamental types. This is the
1742
  // same behaviour as GCC.
1743
1.63k
  const DeclContext *DC = RD->getDeclContext();
1744
1.63k
  if (RD->getIdentifier() &&
1745
1.63k
      
RD->getIdentifier()->isStr("__fundamental_type_info")1.62k
&&
1746
1.63k
      
isa<NamespaceDecl>(DC)10
&&
cast<NamespaceDecl>(DC)->getIdentifier()10
&&
1747
1.63k
      
cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1")10
&&
1748
1.63k
      
DC->getParent()->isTranslationUnit()10
)
1749
10
    EmitFundamentalRTTIDescriptors(RD);
1750
1751
  // Always emit type metadata on non-available_externally definitions, and on
1752
  // available_externally definitions if we are performing whole program
1753
  // devirtualization. For WPD we need the type metadata on all vtable
1754
  // definitions to ensure we associate derived classes with base classes
1755
  // defined in headers but with a strong definition only in a shared library.
1756
1.63k
  if (!VTable->isDeclarationForLinker() ||
1757
1.63k
      
CGM.getCodeGenOpts().WholeProgramVTables136
) {
1758
1.49k
    CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1759
    // For available_externally definitions, add the vtable to
1760
    // @llvm.compiler.used so that it isn't deleted before whole program
1761
    // analysis.
1762
1.49k
    if (VTable->isDeclarationForLinker()) {
1763
1
      assert(CGM.getCodeGenOpts().WholeProgramVTables);
1764
0
      CGM.addCompilerUsedGlobal(VTable);
1765
1
    }
1766
1.49k
  }
1767
1768
1.63k
  if (VTContext.isRelativeLayout() && 
!VTable->isDSOLocal()50
)
1769
49
    CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1770
1.63k
}
1771
1772
bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1773
2.33k
    CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1774
2.33k
  if (Vptr.NearestVBase == nullptr)
1775
2.08k
    return false;
1776
250
  return NeedsVTTParameter(CGF.CurGD);
1777
2.33k
}
1778
1779
llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1780
    CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1781
2.33k
    const CXXRecordDecl *NearestVBase) {
1782
1783
2.33k
  if ((Base.getBase()->getNumVBases() || 
NearestVBase != nullptr1.85k
) &&
1784
2.33k
      
NeedsVTTParameter(CGF.CurGD)713
) {
1785
307
    return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1786
307
                                                  NearestVBase);
1787
307
  }
1788
2.02k
  return getVTableAddressPoint(Base, VTableClass);
1789
2.33k
}
1790
1791
llvm::Constant *
1792
ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1793
2.32k
                                     const CXXRecordDecl *VTableClass) {
1794
2.32k
  llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1795
1796
  // Find the appropriate vtable within the vtable group, and the address point
1797
  // within that vtable.
1798
2.32k
  VTableLayout::AddressPointLocation AddressPoint =
1799
2.32k
      CGM.getItaniumVTableContext()
1800
2.32k
          .getVTableLayout(VTableClass)
1801
2.32k
          .getAddressPoint(Base);
1802
2.32k
  llvm::Value *Indices[] = {
1803
2.32k
    llvm::ConstantInt::get(CGM.Int32Ty, 0),
1804
2.32k
    llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1805
2.32k
    llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1806
2.32k
  };
1807
1808
2.32k
  return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1809
2.32k
                                              Indices, /*InBounds=*/true,
1810
2.32k
                                              /*InRangeIndex=*/1);
1811
2.32k
}
1812
1813
// Check whether all the non-inline virtual methods for the class have the
1814
// specified attribute.
1815
template <typename T>
1816
47
static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
1817
47
  bool FoundNonInlineVirtualMethodWithAttr = false;
1818
285
  for (const auto *D : RD->noload_decls()) {
1819
285
    if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1820
224
      if (!FD->isVirtualAsWritten() || 
FD->isInlineSpecified()89
||
1821
224
          
FD->doesThisDeclarationHaveABody()89
)
1822
162
        continue;
1823
62
      if (!D->hasAttr<T>())
1824
20
        return false;
1825
42
      FoundNonInlineVirtualMethodWithAttr = true;
1826
42
    }
1827
285
  }
1828
1829
  // We didn't find any non-inline virtual methods missing the attribute.  We
1830
  // will return true when we found at least one non-inline virtual with the
1831
  // attribute.  (This lets our caller know that the attribute needs to be
1832
  // propagated up to the vtable.)
1833
27
  return FoundNonInlineVirtualMethodWithAttr;
1834
47
}
ItaniumCXXABI.cpp:bool CXXRecordAllNonInlineVirtualsHaveAttr<clang::DLLImportAttr>(clang::CXXRecordDecl const*)
Line
Count
Source
1816
23
static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
1817
23
  bool FoundNonInlineVirtualMethodWithAttr = false;
1818
121
  for (const auto *D : RD->noload_decls()) {
1819
121
    if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1820
98
      if (!FD->isVirtualAsWritten() || 
FD->isInlineSpecified()53
||
1821
98
          
FD->doesThisDeclarationHaveABody()53
)
1822
60
        continue;
1823
38
      if (!D->hasAttr<T>())
1824
14
        return false;
1825
24
      FoundNonInlineVirtualMethodWithAttr = true;
1826
24
    }
1827
121
  }
1828
1829
  // We didn't find any non-inline virtual methods missing the attribute.  We
1830
  // will return true when we found at least one non-inline virtual with the
1831
  // attribute.  (This lets our caller know that the attribute needs to be
1832
  // propagated up to the vtable.)
1833
9
  return FoundNonInlineVirtualMethodWithAttr;
1834
23
}
ItaniumCXXABI.cpp:bool CXXRecordAllNonInlineVirtualsHaveAttr<clang::DLLExportAttr>(clang::CXXRecordDecl const*)
Line
Count
Source
1816
24
static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
1817
24
  bool FoundNonInlineVirtualMethodWithAttr = false;
1818
164
  for (const auto *D : RD->noload_decls()) {
1819
164
    if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1820
126
      if (!FD->isVirtualAsWritten() || 
FD->isInlineSpecified()36
||
1821
126
          
FD->doesThisDeclarationHaveABody()36
)
1822
102
        continue;
1823
24
      if (!D->hasAttr<T>())
1824
6
        return false;
1825
18
      FoundNonInlineVirtualMethodWithAttr = true;
1826
18
    }
1827
164
  }
1828
1829
  // We didn't find any non-inline virtual methods missing the attribute.  We
1830
  // will return true when we found at least one non-inline virtual with the
1831
  // attribute.  (This lets our caller know that the attribute needs to be
1832
  // propagated up to the vtable.)
1833
18
  return FoundNonInlineVirtualMethodWithAttr;
1834
24
}
1835
1836
llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1837
    CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1838
307
    const CXXRecordDecl *NearestVBase) {
1839
307
  assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1840
307
         NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1841
1842
  // Get the secondary vpointer index.
1843
0
  uint64_t VirtualPointerIndex =
1844
307
      CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1845
1846
  /// Load the VTT.
1847
307
  llvm::Value *VTT = CGF.LoadCXXVTT();
1848
307
  if (VirtualPointerIndex)
1849
108
    VTT = CGF.Builder.CreateConstInBoundsGEP1_64(
1850
108
        CGF.VoidPtrTy, VTT, VirtualPointerIndex);
1851
1852
  // And load the address point from the VTT.
1853
307
  return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
1854
307
                                       CGF.getPointerAlign());
1855
307
}
1856
1857
llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1858
240
    BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1859
240
  return getVTableAddressPoint(Base, VTableClass);
1860
240
}
1861
1862
llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1863
5.76k
                                                     CharUnits VPtrOffset) {
1864
5.76k
  assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1865
1866
0
  llvm::GlobalVariable *&VTable = VTables[RD];
1867
5.76k
  if (VTable)
1868
3.60k
    return VTable;
1869
1870
  // Queue up this vtable for possible deferred emission.
1871
2.15k
  CGM.addDeferredVTable(RD);
1872
1873
2.15k
  SmallString<256> Name;
1874
2.15k
  llvm::raw_svector_ostream Out(Name);
1875
2.15k
  getMangleContext().mangleCXXVTable(RD, Out);
1876
1877
2.15k
  const VTableLayout &VTLayout =
1878
2.15k
      CGM.getItaniumVTableContext().getVTableLayout(RD);
1879
2.15k
  llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1880
1881
  // Use pointer alignment for the vtable. Otherwise we would align them based
1882
  // on the size of the initializer which doesn't make sense as only single
1883
  // values are read.
1884
2.15k
  unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1885
2.15k
                        ? 
3252
1886
2.15k
                        : 
CGM.getTarget().getPointerAlign(0)2.10k
;
1887
1888
2.15k
  VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1889
2.15k
      Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1890
2.15k
      getContext().toCharUnitsFromBits(PAlign).getQuantity());
1891
2.15k
  VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1892
1893
  // In MS C++ if you have a class with virtual functions in which you are using
1894
  // selective member import/export, then all virtual functions must be exported
1895
  // unless they are inline, otherwise a link error will result. To match this
1896
  // behavior, for such classes, we dllimport the vtable if it is defined
1897
  // externally and all the non-inline virtual methods are marked dllimport, and
1898
  // we dllexport the vtable if it is defined in this TU and all the non-inline
1899
  // virtual methods are marked dllexport.
1900
2.15k
  if (CGM.getTarget().hasPS4DLLImportExport()) {
1901
29
    if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
1902
18
      if (CGM.getVTables().isVTableExternal(RD)) {
1903
8
        if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
1904
3
          VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1905
10
      } else {
1906
10
        if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
1907
3
          VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1908
10
      }
1909
18
    }
1910
29
  }
1911
2.15k
  CGM.setGVProperties(VTable, RD);
1912
1913
2.15k
  return VTable;
1914
5.76k
}
1915
1916
CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1917
                                                  GlobalDecl GD,
1918
                                                  Address This,
1919
                                                  llvm::Type *Ty,
1920
824
                                                  SourceLocation Loc) {
1921
824
  llvm::Type *TyPtr = Ty->getPointerTo();
1922
824
  auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1923
824
  llvm::Value *VTable = CGF.GetVTablePtr(
1924
824
      This, TyPtr->getPointerTo(), MethodDecl->getParent());
1925
1926
824
  uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1927
824
  llvm::Value *VFunc;
1928
824
  if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1929
8
    VFunc = CGF.EmitVTableTypeCheckedLoad(
1930
8
        MethodDecl->getParent(), VTable, TyPtr,
1931
8
        VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1932
816
  } else {
1933
816
    CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1934
1935
816
    llvm::Value *VFuncLoad;
1936
816
    if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1937
18
      VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
1938
18
      llvm::Value *Load = CGF.Builder.CreateCall(
1939
18
          CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1940
18
          {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
1941
18
      VFuncLoad = CGF.Builder.CreateBitCast(Load, TyPtr);
1942
798
    } else {
1943
798
      VTable =
1944
798
          CGF.Builder.CreateBitCast(VTable, TyPtr->getPointerTo());
1945
798
      llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1946
798
          TyPtr, VTable, VTableIndex, "vfn");
1947
798
      VFuncLoad =
1948
798
          CGF.Builder.CreateAlignedLoad(TyPtr, VTableSlotPtr,
1949
798
                                        CGF.getPointerAlign());
1950
798
    }
1951
1952
    // Add !invariant.load md to virtual function load to indicate that
1953
    // function didn't change inside vtable.
1954
    // It's safe to add it without -fstrict-vtable-pointers, but it would not
1955
    // help in devirtualization because it will only matter if we will have 2
1956
    // the same virtual function loads from the same vtable load, which won't
1957
    // happen without enabled devirtualization with -fstrict-vtable-pointers.
1958
816
    if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1959
816
        
CGM.getCodeGenOpts().StrictVTablePointers93
) {
1960
44
      if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
1961
44
        VFuncLoadInstr->setMetadata(
1962
44
            llvm::LLVMContext::MD_invariant_load,
1963
44
            llvm::MDNode::get(CGM.getLLVMContext(),
1964
44
                              llvm::ArrayRef<llvm::Metadata *>()));
1965
44
      }
1966
44
    }
1967
816
    VFunc = VFuncLoad;
1968
816
  }
1969
1970
824
  CGCallee Callee(GD, VFunc);
1971
824
  return Callee;
1972
824
}
1973
1974
llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1975
    CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1976
31
    Address This, DeleteOrMemberCallExpr E) {
1977
31
  auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
1978
31
  auto *D = E.dyn_cast<const CXXDeleteExpr *>();
1979
31
  assert((CE != nullptr) ^ (D != nullptr));
1980
0
  assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1981
0
  assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1982
1983
0
  GlobalDecl GD(Dtor, DtorType);
1984
31
  const CGFunctionInfo *FInfo =
1985
31
      &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
1986
31
  llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1987
31
  CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
1988
1989
31
  QualType ThisTy;
1990
31
  if (CE) {
1991
8
    ThisTy = CE->getObjectType();
1992
23
  } else {
1993
23
    ThisTy = D->getDestroyedType();
1994
23
  }
1995
1996
31
  CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
1997
31
                            QualType(), nullptr);
1998
31
  return nullptr;
1999
31
}
2000
2001
373
void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2002
373
  CodeGenVTables &VTables = CGM.getVTables();
2003
373
  llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2004
373
  VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2005
373
}
2006
2007
bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2008
442
    const CXXRecordDecl *RD) const {
2009
  // We don't emit available_externally vtables if we are in -fapple-kext mode
2010
  // because kext mode does not permit devirtualization.
2011
442
  if (CGM.getLangOpts().AppleKext)
2012
0
    return false;
2013
2014
  // If the vtable is hidden then it is not safe to emit an available_externally
2015
  // copy of vtable.
2016
442
  if (isVTableHidden(RD))
2017
19
    return false;
2018
2019
423
  if (CGM.getCodeGenOpts().ForceEmitVTables)
2020
52
    return true;
2021
2022
  // If we don't have any not emitted inline virtual function then we are safe
2023
  // to emit an available_externally copy of vtable.
2024
  // FIXME we can still emit a copy of the vtable if we
2025
  // can emit definition of the inline functions.
2026
371
  if (hasAnyUnusedVirtualInlineFunction(RD))
2027
147
    return false;
2028
2029
  // For a class with virtual bases, we must also be able to speculatively
2030
  // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2031
  // the vtable" and "can emit the VTT". For a base subobject, this means we
2032
  // need to be able to emit non-virtual base vtables.
2033
224
  if (RD->getNumVBases()) {
2034
43
    for (const auto &B : RD->bases()) {
2035
43
      auto *BRD = B.getType()->getAsCXXRecordDecl();
2036
43
      assert(BRD && "no class for base specifier");
2037
43
      if (B.isVirtual() || 
!BRD->isDynamicClass()24
)
2038
20
        continue;
2039
23
      if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2040
8
        return false;
2041
23
    }
2042
37
  }
2043
2044
216
  return true;
2045
224
}
2046
2047
406
bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2048
406
  if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2049
166
    return false;
2050
2051
  // For a complete-object vtable (or more specifically, for the VTT), we need
2052
  // to be able to speculatively emit the vtables of all dynamic virtual bases.
2053
240
  for (const auto &B : RD->vbases()) {
2054
18
    auto *BRD = B.getType()->getAsCXXRecordDecl();
2055
18
    assert(BRD && "no class for base specifier");
2056
18
    if (!BRD->isDynamicClass())
2057
5
      continue;
2058
13
    if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2059
0
      return false;
2060
13
  }
2061
2062
240
  return true;
2063
240
}
2064
static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2065
                                          Address InitialPtr,
2066
                                          int64_t NonVirtualAdjustment,
2067
                                          int64_t VirtualAdjustment,
2068
367
                                          bool IsReturnAdjustment) {
2069
367
  if (!NonVirtualAdjustment && 
!VirtualAdjustment209
)
2070
13
    return InitialPtr.getPointer();
2071
2072
354
  Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
2073
2074
  // In a base-to-derived cast, the non-virtual adjustment is applied first.
2075
354
  if (NonVirtualAdjustment && 
!IsReturnAdjustment158
) {
2076
147
    V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2077
147
                              CharUnits::fromQuantity(NonVirtualAdjustment));
2078
147
  }
2079
2080
  // Perform the virtual adjustment if we have one.
2081
354
  llvm::Value *ResultPtr;
2082
354
  if (VirtualAdjustment) {
2083
210
    Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
2084
210
    llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2085
2086
210
    llvm::Value *Offset;
2087
210
    llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2088
210
        CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2089
210
    if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2090
      // Load the adjustment offset from the vtable as a 32-bit int.
2091
6
      OffsetPtr =
2092
6
          CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
2093
6
      Offset =
2094
6
          CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2095
6
                                        CharUnits::fromQuantity(4));
2096
204
    } else {
2097
204
      llvm::Type *PtrDiffTy =
2098
204
          CGF.ConvertType(CGF.getContext().getPointerDiffType());
2099
2100
204
      OffsetPtr =
2101
204
          CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
2102
2103
      // Load the adjustment offset from the vtable.
2104
204
      Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2105
204
                                             CGF.getPointerAlign());
2106
204
    }
2107
    // Adjust our pointer.
2108
210
    ResultPtr = CGF.Builder.CreateInBoundsGEP(
2109
210
        V.getElementType(), V.getPointer(), Offset);
2110
210
  } else {
2111
144
    ResultPtr = V.getPointer();
2112
144
  }
2113
2114
  // In a derived-to-base conversion, the non-virtual adjustment is
2115
  // applied second.
2116
354
  if (NonVirtualAdjustment && 
IsReturnAdjustment158
) {
2117
11
    ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2118
11
                                                       NonVirtualAdjustment);
2119
11
  }
2120
2121
  // Cast back to the original type.
2122
354
  return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
2123
367
}
2124
2125
llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2126
                                                  Address This,
2127
338
                                                  const ThisAdjustment &TA) {
2128
338
  return performTypeAdjustment(CGF, This, TA.NonVirtual,
2129
338
                               TA.Virtual.Itanium.VCallOffsetOffset,
2130
338
                               /*IsReturnAdjustment=*/false);
2131
338
}
2132
2133
llvm::Value *
2134
ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2135
29
                                       const ReturnAdjustment &RA) {
2136
29
  return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2137
29
                               RA.Virtual.Itanium.VBaseOffsetOffset,
2138
29
                               /*IsReturnAdjustment=*/true);
2139
29
}
2140
2141
void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2142
6
                                    RValue RV, QualType ResultType) {
2143
6
  if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2144
0
    return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2145
2146
  // Destructor thunks in the ARM ABI have indeterminate results.
2147
6
  llvm::Type *T = CGF.ReturnValue.getElementType();
2148
6
  RValue Undef = RValue::get(llvm::UndefValue::get(T));
2149
6
  return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2150
6
}
2151
2152
/************************** Array allocation cookies **************************/
2153
2154
172
CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2155
  // The array cookie is a size_t; pad that up to the element alignment.
2156
  // The cookie is actually right-justified in that space.
2157
172
  return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2158
172
                  CGM.getContext().getPreferredTypeAlignInChars(elementType));
2159
172
}
2160
2161
Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2162
                                             Address NewPtr,
2163
                                             llvm::Value *NumElements,
2164
                                             const CXXNewExpr *expr,
2165
42
                                             QualType ElementType) {
2166
42
  assert(requiresArrayCookie(expr));
2167
2168
0
  unsigned AS = NewPtr.getAddressSpace();
2169
2170
42
  ASTContext &Ctx = getContext();
2171
42
  CharUnits SizeSize = CGF.getSizeSize();
2172
2173
  // The size of the cookie.
2174
42
  CharUnits CookieSize =
2175
42
      std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2176
42
  assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2177
2178
  // Compute an offset to the cookie.
2179
0
  Address CookiePtr = NewPtr;
2180
42
  CharUnits CookieOffset = CookieSize - SizeSize;
2181
42
  if (!CookieOffset.isZero())
2182
5
    CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2183
2184
  // Write the number of elements into the appropriate slot.
2185
42
  Address NumElementsPtr =
2186
42
      CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2187
42
  llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2188
2189
  // Handle the array cookie specially in ASan.
2190
42
  if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && 
AS == 08
&&
2191
42
      
(8
expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()8
||
2192
8
       
CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie4
)) {
2193
    // The store to the CookiePtr does not need to be instrumented.
2194
6
    CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2195
6
    llvm::FunctionType *FTy =
2196
6
        llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2197
6
    llvm::FunctionCallee F =
2198
6
        CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2199
6
    CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2200
6
  }
2201
2202
  // Finally, compute a pointer to the actual data buffer by skipping
2203
  // over the cookie completely.
2204
42
  return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2205
42
}
2206
2207
llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2208
                                                Address allocPtr,
2209
46
                                                CharUnits cookieSize) {
2210
  // The element size is right-justified in the cookie.
2211
46
  Address numElementsPtr = allocPtr;
2212
46
  CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2213
46
  if (!numElementsOffset.isZero())
2214
5
    numElementsPtr =
2215
5
      CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2216
2217
46
  unsigned AS = allocPtr.getAddressSpace();
2218
46
  numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2219
46
  if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || 
AS != 02
)
2220
44
    return CGF.Builder.CreateLoad(numElementsPtr);
2221
  // In asan mode emit a function call instead of a regular load and let the
2222
  // run-time deal with it: if the shadow is properly poisoned return the
2223
  // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2224
  // We can't simply ignore this load using nosanitize metadata because
2225
  // the metadata may be lost.
2226
2
  llvm::FunctionType *FTy =
2227
2
      llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2228
2
  llvm::FunctionCallee F =
2229
2
      CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2230
2
  return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2231
46
}
2232
2233
64
CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2234
  // ARM says that the cookie is always:
2235
  //   struct array_cookie {
2236
  //     std::size_t element_size; // element_size != 0
2237
  //     std::size_t element_count;
2238
  //   };
2239
  // But the base ABI doesn't give anything an alignment greater than
2240
  // 8, so we can dismiss this as typical ABI-author blindness to
2241
  // actual language complexity and round up to the element alignment.
2242
64
  return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2243
64
                  CGM.getContext().getTypeAlignInChars(elementType));
2244
64
}
2245
2246
Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2247
                                         Address newPtr,
2248
                                         llvm::Value *numElements,
2249
                                         const CXXNewExpr *expr,
2250
18
                                         QualType elementType) {
2251
18
  assert(requiresArrayCookie(expr));
2252
2253
  // The cookie is always at the start of the buffer.
2254
0
  Address cookie = newPtr;
2255
2256
  // The first element is the element size.
2257
18
  cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2258
18
  llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2259
18
                 getContext().getTypeSizeInChars(elementType).getQuantity());
2260
18
  CGF.Builder.CreateStore(elementSize, cookie);
2261
2262
  // The second element is the element count.
2263
18
  cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2264
18
  CGF.Builder.CreateStore(numElements, cookie);
2265
2266
  // Finally, compute a pointer to the actual data buffer by skipping
2267
  // over the cookie completely.
2268
18
  CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2269
18
  return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2270
18
}
2271
2272
llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2273
                                            Address allocPtr,
2274
10
                                            CharUnits cookieSize) {
2275
  // The number of elements is at offset sizeof(size_t) relative to
2276
  // the allocated pointer.
2277
10
  Address numElementsPtr
2278
10
    = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2279
2280
10
  numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2281
10
  return CGF.Builder.CreateLoad(numElementsPtr);
2282
10
}
2283
2284
/*********************** Static local initialization **************************/
2285
2286
static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2287
171
                                              llvm::PointerType *GuardPtrTy) {
2288
  // int __cxa_guard_acquire(__guard *guard_object);
2289
171
  llvm::FunctionType *FTy =
2290
171
    llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2291
171
                            GuardPtrTy, /*isVarArg=*/false);
2292
171
  return CGM.CreateRuntimeFunction(
2293
171
      FTy, "__cxa_guard_acquire",
2294
171
      llvm::AttributeList::get(CGM.getLLVMContext(),
2295
171
                               llvm::AttributeList::FunctionIndex,
2296
171
                               llvm::Attribute::NoUnwind));
2297
171
}
2298
2299
static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2300
171
                                              llvm::PointerType *GuardPtrTy) {
2301
  // void __cxa_guard_release(__guard *guard_object);
2302
171
  llvm::FunctionType *FTy =
2303
171
    llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2304
171
  return CGM.CreateRuntimeFunction(
2305
171
      FTy, "__cxa_guard_release",
2306
171
      llvm::AttributeList::get(CGM.getLLVMContext(),
2307
171
                               llvm::AttributeList::FunctionIndex,
2308
171
                               llvm::Attribute::NoUnwind));
2309
171
}
2310
2311
static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2312
30
                                            llvm::PointerType *GuardPtrTy) {
2313
  // void __cxa_guard_abort(__guard *guard_object);
2314
30
  llvm::FunctionType *FTy =
2315
30
    llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2316
30
  return CGM.CreateRuntimeFunction(
2317
30
      FTy, "__cxa_guard_abort",
2318
30
      llvm::AttributeList::get(CGM.getLLVMContext(),
2319
30
                               llvm::AttributeList::FunctionIndex,
2320
30
                               llvm::Attribute::NoUnwind));
2321
30
}
2322
2323
namespace {
2324
  struct CallGuardAbort final : EHScopeStack::Cleanup {
2325
    llvm::GlobalVariable *Guard;
2326
171
    CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2327
2328
30
    void Emit(CodeGenFunction &CGF, Flags flags) override {
2329
30
      CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2330
30
                                  Guard);
2331
30
    }
2332
  };
2333
}
2334
2335
/// The ARM code here follows the Itanium code closely enough that we
2336
/// just special-case it at particular places.
2337
void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2338
                                    const VarDecl &D,
2339
                                    llvm::GlobalVariable *var,
2340
9.08k
                                    bool shouldPerformInit) {
2341
9.08k
  CGBuilderTy &Builder = CGF.Builder;
2342
2343
  // Inline variables that weren't instantiated from variable templates have
2344
  // partially-ordered initialization within their translation unit.
2345
9.08k
  bool NonTemplateInline =
2346
9.08k
      D.isInline() &&
2347
9.08k
      
!isTemplateInstantiation(D.getTemplateSpecializationKind())56
;
2348
2349
  // We only need to use thread-safe statics for local non-TLS variables and
2350
  // inline variables; other global initialization is always single-threaded
2351
  // or (through lazy dynamic loading in multiple threads) unsequenced.
2352
9.08k
  bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2353
9.08k
                    
(410
D.isLocalVarDecl()410
||
NonTemplateInline183
) &&
2354
9.08k
                    
!D.getTLSKind()242
;
2355
2356
  // If we have a global variable with internal linkage and thread-safe statics
2357
  // are disabled, we can just let the guard variable be of type i8.
2358
9.08k
  bool useInt8GuardVariable = !threadsafe && 
var->hasInternalLinkage()8.91k
;
2359
2360
9.08k
  llvm::IntegerType *guardTy;
2361
9.08k
  CharUnits guardAlignment;
2362
9.08k
  if (useInt8GuardVariable) {
2363
8.73k
    guardTy = CGF.Int8Ty;
2364
8.73k
    guardAlignment = CharUnits::One();
2365
8.73k
  } else {
2366
    // Guard variables are 64 bits in the generic ABI and size width on ARM
2367
    // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2368
352
    if (UseARMGuardVarABI) {
2369
13
      guardTy = CGF.SizeTy;
2370
13
      guardAlignment = CGF.getSizeAlign();
2371
339
    } else {
2372
339
      guardTy = CGF.Int64Ty;
2373
339
      guardAlignment = CharUnits::fromQuantity(
2374
339
                             CGM.getDataLayout().getABITypeAlignment(guardTy));
2375
339
    }
2376
352
  }
2377
9.08k
  llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
2378
9.08k
      CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2379
2380
  // Create the guard variable if we don't already have it (as we
2381
  // might if we're double-emitting this function body).
2382
9.08k
  llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2383
9.08k
  if (!guard) {
2384
    // Mangle the name for the guard.
2385
9.08k
    SmallString<256> guardName;
2386
9.08k
    {
2387
9.08k
      llvm::raw_svector_ostream out(guardName);
2388
9.08k
      getMangleContext().mangleStaticGuardVariable(&D, out);
2389
9.08k
    }
2390
2391
    // Create the guard variable with a zero-initializer.
2392
    // Just absorb linkage and visibility from the guarded variable.
2393
9.08k
    guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2394
9.08k
                                     false, var->getLinkage(),
2395
9.08k
                                     llvm::ConstantInt::get(guardTy, 0),
2396
9.08k
                                     guardName.str());
2397
9.08k
    guard->setDSOLocal(var->isDSOLocal());
2398
9.08k
    guard->setVisibility(var->getVisibility());
2399
    // If the variable is thread-local, so is its guard variable.
2400
9.08k
    guard->setThreadLocalMode(var->getThreadLocalMode());
2401
9.08k
    guard->setAlignment(guardAlignment.getAsAlign());
2402
2403
    // The ABI says: "It is suggested that it be emitted in the same COMDAT
2404
    // group as the associated data object." In practice, this doesn't work for
2405
    // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2406
9.08k
    llvm::Comdat *C = var->getComdat();
2407
9.08k
    if (!D.isLocalVarDecl() && 
C183
&&
2408
9.08k
        
(140
CGM.getTarget().getTriple().isOSBinFormatELF()140
||
2409
140
         
CGM.getTarget().getTriple().isOSBinFormatWasm()0
)) {
2410
140
      guard->setComdat(C);
2411
8.94k
    } else if (CGM.supportsCOMDAT() && 
guard->isWeakForLinker()168
) {
2412
31
      guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2413
31
    }
2414
2415
9.08k
    CGM.setStaticLocalDeclGuardAddress(&D, guard);
2416
9.08k
  }
2417
2418
9.08k
  Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2419
2420
  // Test whether the variable has completed initialization.
2421
  //
2422
  // Itanium C++ ABI 3.3.2:
2423
  //   The following is pseudo-code showing how these functions can be used:
2424
  //     if (obj_guard.first_byte == 0) {
2425
  //       if ( __cxa_guard_acquire (&obj_guard) ) {
2426
  //         try {
2427
  //           ... initialize the object ...;
2428
  //         } catch (...) {
2429
  //            __cxa_guard_abort (&obj_guard);
2430
  //            throw;
2431
  //         }
2432
  //         ... queue object destructor with __cxa_atexit() ...;
2433
  //         __cxa_guard_release (&obj_guard);
2434
  //       }
2435
  //     }
2436
2437
  // Load the first byte of the guard variable.
2438
9.08k
  llvm::LoadInst *LI =
2439
9.08k
      Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2440
2441
  // Itanium ABI:
2442
  //   An implementation supporting thread-safety on multiprocessor
2443
  //   systems must also guarantee that references to the initialized
2444
  //   object do not occur before the load of the initialization flag.
2445
  //
2446
  // In LLVM, we do this by marking the load Acquire.
2447
9.08k
  if (threadsafe)
2448
171
    LI->setAtomic(llvm::AtomicOrdering::Acquire);
2449
2450
  // For ARM, we should only check the first bit, rather than the entire byte:
2451
  //
2452
  // ARM C++ ABI 3.2.3.1:
2453
  //   To support the potential use of initialization guard variables
2454
  //   as semaphores that are the target of ARM SWP and LDREX/STREX
2455
  //   synchronizing instructions we define a static initialization
2456
  //   guard variable to be a 4-byte aligned, 4-byte word with the
2457
  //   following inline access protocol.
2458
  //     #define INITIALIZED 1
2459
  //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
2460
  //       if (__cxa_guard_acquire(&obj_guard))
2461
  //         ...
2462
  //     }
2463
  //
2464
  // and similarly for ARM64:
2465
  //
2466
  // ARM64 C++ ABI 3.2.2:
2467
  //   This ABI instead only specifies the value bit 0 of the static guard
2468
  //   variable; all other bits are platform defined. Bit 0 shall be 0 when the
2469
  //   variable is not initialized and 1 when it is.
2470
9.08k
  llvm::Value *V =
2471
9.08k
      (UseARMGuardVarABI && 
!useInt8GuardVariable17
)
2472
9.08k
          ? 
Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))13
2473
9.08k
          : 
LI9.07k
;
2474
9.08k
  llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2475
2476
9.08k
  llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2477
9.08k
  llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2478
2479
  // Check if the first byte of the guard variable is zero.
2480
9.08k
  CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2481
9.08k
                               CodeGenFunction::GuardKind::VariableGuard, &D);
2482
2483
9.08k
  CGF.EmitBlock(InitCheckBlock);
2484
2485
  // Variables used when coping with thread-safe statics and exceptions.
2486
9.08k
  if (threadsafe) {
2487
    // Call __cxa_guard_acquire.
2488
171
    llvm::Value *V
2489
171
      = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2490
2491
171
    llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2492
2493
171
    Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2494
171
                         InitBlock, EndBlock);
2495
2496
    // Call __cxa_guard_abort along the exceptional edge.
2497
171
    CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2498
2499
171
    CGF.EmitBlock(InitBlock);
2500
171
  }
2501
2502
  // Emit the initializer and add a global destructor if appropriate.
2503
9.08k
  CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2504
2505
9.08k
  if (threadsafe) {
2506
    // Pop the guard-abort cleanup if we pushed one.
2507
171
    CGF.PopCleanupBlock();
2508
2509
    // Call __cxa_guard_release.  This cannot throw.
2510
171
    CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2511
171
                                guardAddr.getPointer());
2512
8.91k
  } else {
2513
    // Store 1 into the first byte of the guard variable after initialization is
2514
    // complete.
2515
8.91k
    Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2516
8.91k
                        Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2517
8.91k
  }
2518
2519
9.08k
  CGF.EmitBlock(EndBlock);
2520
9.08k
}
2521
2522
/// Register a global destructor using __cxa_atexit.
2523
static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2524
                                        llvm::FunctionCallee dtor,
2525
4.58k
                                        llvm::Constant *addr, bool TLS) {
2526
4.58k
  assert(!CGF.getTarget().getTriple().isOSAIX() &&
2527
4.58k
         "unexpected call to emitGlobalDtorWithCXAAtExit");
2528
0
  assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2529
4.58k
         "__cxa_atexit is disabled");
2530
0
  const char *Name = "__cxa_atexit";
2531
4.58k
  if (TLS) {
2532
120
    const llvm::Triple &T = CGF.getTarget().getTriple();
2533
120
    Name = T.isOSDarwin() ?  
"_tlv_atexit"20
:
"__cxa_thread_atexit"100
;
2534
120
  }
2535
2536
  // We're assuming that the destructor function is something we can
2537
  // reasonably call with the default CC.  Go ahead and cast it to the
2538
  // right prototype.
2539
4.58k
  llvm::Type *dtorTy =
2540
4.58k
    llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2541
2542
  // Preserve address space of addr.
2543
4.58k
  auto AddrAS = addr ? 
addr->getType()->getPointerAddressSpace()4.57k
:
05
;
2544
4.58k
  auto AddrInt8PtrTy =
2545
4.58k
      AddrAS ? 
CGF.Int8Ty->getPointerTo(AddrAS)0
: CGF.Int8PtrTy;
2546
2547
  // Create a variable that binds the atexit to this shared object.
2548
4.58k
  llvm::Constant *handle =
2549
4.58k
      CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2550
4.58k
  auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2551
4.58k
  GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2552
2553
  // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2554
4.58k
  llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2555
4.58k
  llvm::FunctionType *atexitTy =
2556
4.58k
    llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2557
2558
  // Fetch the actual function.
2559
4.58k
  llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2560
4.58k
  if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2561
4.58k
    fn->setDoesNotThrow();
2562
2563
4.58k
  if (!addr)
2564
    // addr is null when we are trying to register a dtor annotated with
2565
    // __attribute__((destructor)) in a constructor function. Using null here is
2566
    // okay because this argument is just passed back to the destructor
2567
    // function.
2568
5
    addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2569
2570
4.58k
  llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2571
4.58k
                             cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2572
4.58k
                         llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2573
4.58k
                         handle};
2574
4.58k
  CGF.EmitNounwindRuntimeCall(atexit, args);
2575
4.58k
}
2576
2577
static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2578
22
                                                   StringRef FnName) {
2579
  // Create a function that registers/unregisters destructors that have the same
2580
  // priority.
2581
22
  llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2582
22
  llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2583
22
      FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2584
2585
22
  return GlobalInitOrCleanupFn;
2586
22
}
2587
2588
117
void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2589
117
  for (const auto &I : DtorsUsingAtExit) {
2590
8
    int Priority = I.first;
2591
8
    std::string GlobalCleanupFnName =
2592
8
        std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2593
2594
8
    llvm::Function *GlobalCleanupFn =
2595
8
        createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2596
2597
8
    CodeGenFunction CGF(*this);
2598
8
    CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2599
8
                      getTypes().arrangeNullaryFunction(), FunctionArgList(),
2600
8
                      SourceLocation(), SourceLocation());
2601
8
    auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2602
2603
    // Get the destructor function type, void(*)(void).
2604
8
    llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2605
8
    llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
2606
2607
    // Destructor functions are run/unregistered in non-ascending
2608
    // order of their priorities.
2609
8
    const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2610
8
    auto itv = Dtors.rbegin();
2611
20
    while (itv != Dtors.rend()) {
2612
12
      llvm::Function *Dtor = *itv;
2613
2614
      // We're assuming that the destructor function is something we can
2615
      // reasonably call with the correct CC.  Go ahead and cast it to the
2616
      // right prototype.
2617
12
      llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
2618
12
      llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
2619
12
      llvm::Value *NeedsDestruct =
2620
12
          CGF.Builder.CreateIsNull(V, "needs_destruct");
2621
2622
12
      llvm::BasicBlock *DestructCallBlock =
2623
12
          CGF.createBasicBlock("destruct.call");
2624
12
      llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2625
12
          (itv + 1) != Dtors.rend() ? 
"unatexit.call"4
:
"destruct.end"8
);
2626
      // Check if unatexit returns a value of 0. If it does, jump to
2627
      // DestructCallBlock, otherwise jump to EndBlock directly.
2628
12
      CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2629
2630
12
      CGF.EmitBlock(DestructCallBlock);
2631
2632
      // Emit the call to casted Dtor.
2633
12
      llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor);
2634
      // Make sure the call and the callee agree on calling convention.
2635
12
      CI->setCallingConv(Dtor->getCallingConv());
2636
2637
12
      CGF.EmitBlock(EndBlock);
2638
2639
12
      itv++;
2640
12
    }
2641
2642
8
    CGF.FinishFunction();
2643
8
    AddGlobalDtor(GlobalCleanupFn, Priority);
2644
8
  }
2645
117
}
2646
2647
35.9k
void CodeGenModule::registerGlobalDtorsWithAtExit() {
2648
35.9k
  for (const auto &I : DtorsUsingAtExit) {
2649
14
    int Priority = I.first;
2650
14
    std::string GlobalInitFnName =
2651
14
        std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2652
14
    llvm::Function *GlobalInitFn =
2653
14
        createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2654
2655
14
    CodeGenFunction CGF(*this);
2656
14
    CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2657
14
                      getTypes().arrangeNullaryFunction(), FunctionArgList(),
2658
14
                      SourceLocation(), SourceLocation());
2659
14
    auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2660
2661
    // Since constructor functions are run in non-descending order of their
2662
    // priorities, destructors are registered in non-descending order of their
2663
    // priorities, and since destructor functions are run in the reverse order
2664
    // of their registration, destructor functions are run in non-ascending
2665
    // order of their priorities.
2666
14
    const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2667
22
    for (auto *Dtor : Dtors) {
2668
      // Register the destructor function calling __cxa_atexit if it is
2669
      // available. Otherwise fall back on calling atexit.
2670
22
      if (getCodeGenOpts().CXAAtExit) {
2671
5
        emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2672
17
      } else {
2673
        // Get the destructor function type, void(*)(void).
2674
17
        llvm::Type *dtorTy =
2675
17
            llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo();
2676
2677
        // We're assuming that the destructor function is something we can
2678
        // reasonably call with the correct CC.  Go ahead and cast it to the
2679
        // right prototype.
2680
17
        CGF.registerGlobalDtorWithAtExit(
2681
17
            llvm::ConstantExpr::getBitCast(Dtor, dtorTy));
2682
17
      }
2683
22
    }
2684
2685
14
    CGF.FinishFunction();
2686
14
    AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2687
14
  }
2688
2689
35.9k
  if (getCXXABI().useSinitAndSterm())
2690
117
    unregisterGlobalDtorsWithUnAtExit();
2691
35.9k
}
2692
2693
/// Register a global destructor as best as we know how.
2694
void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2695
                                       llvm::FunctionCallee dtor,
2696
4.60k
                                       llvm::Constant *addr) {
2697
4.60k
  if (D.isNoDestroy(CGM.getContext()))
2698
0
    return;
2699
2700
  // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2701
  // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2702
  // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2703
  // We can always use __cxa_thread_atexit.
2704
4.60k
  if (CGM.getCodeGenOpts().CXAAtExit || 
D.getTLSKind()44
)
2705
4.57k
    return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2706
2707
  // In Apple kexts, we want to add a global destructor entry.
2708
  // FIXME: shouldn't this be guarded by some variable?
2709
24
  if (CGM.getLangOpts().AppleKext) {
2710
    // Generate a global destructor entry.
2711
6
    return CGM.AddCXXDtorEntry(dtor, addr);
2712
6
  }
2713
2714
18
  CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2715
18
}
2716
2717
static bool isThreadWrapperReplaceable(const VarDecl *VD,
2718
930
                                       CodeGen::CodeGenModule &CGM) {
2719
930
  assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2720
  // Darwin prefers to have references to thread local variables to go through
2721
  // the thread wrapper instead of directly referencing the backing variable.
2722
930
  return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2723
930
         CGM.getTarget().getTriple().isOSDarwin();
2724
930
}
2725
2726
/// Get the appropriate linkage for the wrapper function. This is essentially
2727
/// the weak form of the variable's linkage; every translation unit which needs
2728
/// the wrapper emits a copy, and we want the linker to merge them.
2729
static llvm::GlobalValue::LinkageTypes
2730
256
getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2731
256
  llvm::GlobalValue::LinkageTypes VarLinkage =
2732
256
      CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2733
2734
  // For internal linkage variables, we don't need an external or weak wrapper.
2735
256
  if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2736
15
    return VarLinkage;
2737
2738
  // If the thread wrapper is replaceable, give it appropriate linkage.
2739
241
  if (isThreadWrapperReplaceable(VD, CGM))
2740
60
    if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2741
60
        
!llvm::GlobalVariable::isWeakODRLinkage(VarLinkage)53
)
2742
46
      return VarLinkage;
2743
195
  return llvm::GlobalValue::WeakODRLinkage;
2744
241
}
2745
2746
llvm::Function *
2747
ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2748
301
                                             llvm::Value *Val) {
2749
  // Mangle the name for the thread_local wrapper function.
2750
301
  SmallString<256> WrapperName;
2751
301
  {
2752
301
    llvm::raw_svector_ostream Out(WrapperName);
2753
301
    getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2754
301
  }
2755
2756
  // FIXME: If VD is a definition, we should regenerate the function attributes
2757
  // before returning.
2758
301
  if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2759
45
    return cast<llvm::Function>(V);
2760
2761
256
  QualType RetQT = VD->getType();
2762
256
  if (RetQT->isReferenceType())
2763
3
    RetQT = RetQT.getNonReferenceType();
2764
2765
256
  const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2766
256
      getContext().getPointerType(RetQT), FunctionArgList());
2767
2768
256
  llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2769
256
  llvm::Function *Wrapper =
2770
256
      llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2771
256
                             WrapperName.str(), &CGM.getModule());
2772
2773
256
  if (CGM.supportsCOMDAT() && 
Wrapper->isWeakForLinker()177
)
2774
165
    Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2775
2776
256
  CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
2777
2778
  // Always resolve references to the wrapper at link time.
2779
256
  if (!Wrapper->hasLocalLinkage())
2780
241
    if (!isThreadWrapperReplaceable(VD, CGM) ||
2781
241
        
llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage())60
||
2782
241
        
llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage())60
||
2783
241
        
VD->getVisibility() == HiddenVisibility46
)
2784
196
      Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2785
2786
256
  if (isThreadWrapperReplaceable(VD, CGM)) {
2787
62
    Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2788
62
    Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2789
62
  }
2790
2791
256
  ThreadWrappers.push_back({VD, Wrapper});
2792
256
  return Wrapper;
2793
301
}
2794
2795
void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2796
    CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2797
    ArrayRef<llvm::Function *> CXXThreadLocalInits,
2798
35.1k
    ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2799
35.1k
  llvm::Function *InitFunc = nullptr;
2800
2801
  // Separate initializers into those with ordered (or partially-ordered)
2802
  // initialization and those with unordered initialization.
2803
35.1k
  llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2804
35.1k
  llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2805
35.2k
  for (unsigned I = 0; I != CXXThreadLocalInits.size(); 
++I177
) {
2806
177
    if (isTemplateInstantiation(
2807
177
            CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2808
80
      UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2809
80
          CXXThreadLocalInits[I];
2810
97
    else
2811
97
      OrderedInits.push_back(CXXThreadLocalInits[I]);
2812
177
  }
2813
2814
35.1k
  if (!OrderedInits.empty()) {
2815
    // Generate a guarded initialization function.
2816
51
    llvm::FunctionType *FTy =
2817
51
        llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2818
51
    const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2819
51
    InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2820
51
                                                     SourceLocation(),
2821
51
                                                     /*TLS=*/true);
2822
51
    llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2823
51
        CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2824
51
        llvm::GlobalVariable::InternalLinkage,
2825
51
        llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2826
51
    Guard->setThreadLocal(true);
2827
51
    Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2828
2829
51
    CharUnits GuardAlign = CharUnits::One();
2830
51
    Guard->setAlignment(GuardAlign.getAsAlign());
2831
2832
51
    CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2833
51
        InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
2834
    // On Darwin platforms, use CXX_FAST_TLS calling convention.
2835
51
    if (CGM.getTarget().getTriple().isOSDarwin()) {
2836
12
      InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2837
12
      InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2838
12
    }
2839
51
  }
2840
2841
  // Create declarations for thread wrappers for all thread-local variables
2842
  // with non-discardable definitions in this translation unit.
2843
35.1k
  for (const VarDecl *VD : CXXThreadLocals) {
2844
330
    if (VD->hasDefinition() &&
2845
330
        
!isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))264
) {
2846
137
      llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2847
137
      getOrCreateThreadLocalWrapper(VD, GV);
2848
137
    }
2849
330
  }
2850
2851
  // Emit all referenced thread wrappers.
2852
35.1k
  for (auto VDAndWrapper : ThreadWrappers) {
2853
256
    const VarDecl *VD = VDAndWrapper.first;
2854
256
    llvm::GlobalVariable *Var =
2855
256
        cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2856
256
    llvm::Function *Wrapper = VDAndWrapper.second;
2857
2858
    // Some targets require that all access to thread local variables go through
2859
    // the thread wrapper.  This means that we cannot attempt to create a thread
2860
    // wrapper or a thread helper.
2861
256
    if (!VD->hasDefinition()) {
2862
64
      if (isThreadWrapperReplaceable(VD, CGM)) {
2863
14
        Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2864
14
        continue;
2865
14
      }
2866
2867
      // If this isn't a TU in which this variable is defined, the thread
2868
      // wrapper is discardable.
2869
50
      if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2870
50
        Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2871
50
    }
2872
2873
242
    CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2874
2875
    // Mangle the name for the thread_local initialization function.
2876
242
    SmallString<256> InitFnName;
2877
242
    {
2878
242
      llvm::raw_svector_ostream Out(InitFnName);
2879
242
      getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2880
242
    }
2881
2882
242
    llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2883
2884
    // If we have a definition for the variable, emit the initialization
2885
    // function as an alias to the global Init function (if any). Otherwise,
2886
    // produce a declaration of the initialization function.
2887
242
    llvm::GlobalValue *Init = nullptr;
2888
242
    bool InitIsInitFunc = false;
2889
242
    bool HasConstantInitialization = false;
2890
242
    if (!usesThreadWrapperFunction(VD)) {
2891
62
      HasConstantInitialization = true;
2892
180
    } else if (VD->hasDefinition()) {
2893
130
      InitIsInitFunc = true;
2894
130
      llvm::Function *InitFuncToUse = InitFunc;
2895
130
      if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2896
44
        InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2897
130
      if (InitFuncToUse)
2898
128
        Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2899
128
                                         InitFuncToUse);
2900
130
    } else {
2901
      // Emit a weak global function referring to the initialization function.
2902
      // This function will not exist if the TU defining the thread_local
2903
      // variable in question does not need any dynamic initialization for
2904
      // its thread_local variables.
2905
50
      Init = llvm::Function::Create(InitFnTy,
2906
50
                                    llvm::GlobalVariable::ExternalWeakLinkage,
2907
50
                                    InitFnName.str(), &CGM.getModule());
2908
50
      const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2909
50
      CGM.SetLLVMFunctionAttributes(
2910
50
          GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
2911
50
    }
2912
2913
242
    if (Init) {
2914
178
      Init->setVisibility(Var->getVisibility());
2915
      // Don't mark an extern_weak function DSO local on windows.
2916
178
      if (!CGM.getTriple().isOSWindows() || 
!Init->hasExternalWeakLinkage()16
)
2917
176
        Init->setDSOLocal(Var->isDSOLocal());
2918
178
    }
2919
2920
242
    llvm::LLVMContext &Context = CGM.getModule().getContext();
2921
2922
    // The linker on AIX is not happy with missing weak symbols.  However,
2923
    // other TUs will not know whether the initialization routine exists
2924
    // so create an empty, init function to satisfy the linker.
2925
    // This is needed whenever a thread wrapper function is not used, and
2926
    // also when the symbol is weak.
2927
242
    if (CGM.getTriple().isOSAIX() && 
VD->hasDefinition()17
&&
2928
242
        
isEmittedWithConstantInitializer(VD, true)12
&&
2929
242
        
!mayNeedDestruction(VD)6
) {
2930
      // Init should be null.  If it were non-null, then the logic above would
2931
      // either be defining the function to be an alias or declaring the
2932
      // function with the expectation that the definition of the variable
2933
      // is elsewhere.
2934
4
      assert(Init == nullptr && "Expected Init to be null.");
2935
2936
0
      llvm::Function *Func = llvm::Function::Create(
2937
4
          InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
2938
4
      const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2939
4
      CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2940
4
                                    cast<llvm::Function>(Func),
2941
4
                                    /*IsThunk=*/false);
2942
      // Create a function body that just returns
2943
4
      llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
2944
4
      CGBuilderTy Builder(CGM, Entry);
2945
4
      Builder.CreateRetVoid();
2946
4
    }
2947
2948
0
    llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2949
242
    CGBuilderTy Builder(CGM, Entry);
2950
242
    if (HasConstantInitialization) {
2951
      // No dynamic initialization to invoke.
2952
180
    } else if (InitIsInitFunc) {
2953
130
      if (Init) {
2954
128
        llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
2955
128
        if (isThreadWrapperReplaceable(VD, CGM)) {
2956
27
          CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2957
27
          llvm::Function *Fn =
2958
27
              cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2959
27
          Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2960
27
        }
2961
128
      }
2962
130
    } else 
if (50
CGM.getTriple().isOSAIX()50
) {
2963
      // On AIX, except if constinit and also neither of class type or of
2964
      // (possibly multi-dimensional) array of class type, thread_local vars
2965
      // will have init routines regardless of whether they are
2966
      // const-initialized.  Since the routine is guaranteed to exist, we can
2967
      // unconditionally call it without testing for its existance.  This
2968
      // avoids potentially unresolved weak symbols which the AIX linker
2969
      // isn't happy with.
2970
5
      Builder.CreateCall(InitFnTy, Init);
2971
45
    } else {
2972
      // Don't know whether we have an init function. Call it if it exists.
2973
45
      llvm::Value *Have = Builder.CreateIsNotNull(Init);
2974
45
      llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2975
45
      llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2976
45
      Builder.CreateCondBr(Have, InitBB, ExitBB);
2977
2978
45
      Builder.SetInsertPoint(InitBB);
2979
45
      Builder.CreateCall(InitFnTy, Init);
2980
45
      Builder.CreateBr(ExitBB);
2981
2982
45
      Builder.SetInsertPoint(ExitBB);
2983
45
    }
2984
2985
    // For a reference, the result of the wrapper function is a pointer to
2986
    // the referenced object.
2987
242
    llvm::Value *Val = Var;
2988
242
    if (VD->getType()->isReferenceType()) {
2989
3
      CharUnits Align = CGM.getContext().getDeclAlign(VD);
2990
3
      Val = Builder.CreateAlignedLoad(Var->getValueType(), Var, Align);
2991
3
    }
2992
242
    if (Val->getType() != Wrapper->getReturnType())
2993
0
      Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2994
0
          Val, Wrapper->getReturnType(), "");
2995
242
    Builder.CreateRet(Val);
2996
242
  }
2997
35.1k
}
2998
2999
LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3000
                                                   const VarDecl *VD,
3001
164
                                                   QualType LValType) {
3002
164
  llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3003
164
  llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3004
3005
164
  llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3006
164
  CallVal->setCallingConv(Wrapper->getCallingConv());
3007
3008
164
  LValue LV;
3009
164
  if (VD->getType()->isReferenceType())
3010
3
    LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
3011
161
  else
3012
161
    LV = CGF.MakeAddrLValue(CallVal, LValType,
3013
161
                            CGF.getContext().getDeclAlign(VD));
3014
  // FIXME: need setObjCGCLValueClass?
3015
164
  return LV;
3016
164
}
3017
3018
/// Return whether the given global decl needs a VTT parameter, which it does
3019
/// if it's a base constructor or destructor with virtual bases.
3020
155k
bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3021
155k
  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3022
3023
  // We don't have any virtual bases, just return early.
3024
155k
  if (!MD->getParent()->getNumVBases())
3025
152k
    return false;
3026
3027
  // Check if we have a base constructor.
3028
3.13k
  if (isa<CXXConstructorDecl>(MD) && 
GD.getCtorType() == Ctor_Base2.11k
)
3029
1.02k
    return true;
3030
3031
  // Check if we have a base destructor.
3032
2.11k
  if (isa<CXXDestructorDecl>(MD) && 
GD.getDtorType() == Dtor_Base1.02k
)
3033
423
    return true;
3034
3035
1.68k
  return false;
3036
2.11k
}
3037
3038
namespace {
3039
class ItaniumRTTIBuilder {
3040
  CodeGenModule &CGM;  // Per-module state.
3041
  llvm::LLVMContext &VMContext;
3042
  const ItaniumCXXABI &CXXABI;  // Per-module state.
3043
3044
  /// Fields - The fields of the RTTI descriptor currently being built.
3045
  SmallVector<llvm::Constant *, 16> Fields;
3046
3047
  /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3048
  llvm::GlobalVariable *
3049
  GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3050
3051
  /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3052
  /// descriptor of the given type.
3053
  llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3054
3055
  /// BuildVTablePointer - Build the vtable pointer for the given type.
3056
  void BuildVTablePointer(const Type *Ty);
3057
3058
  /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3059
  /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3060
  void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3061
3062
  /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3063
  /// classes with bases that do not satisfy the abi::__si_class_type_info
3064
  /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3065
  void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3066
3067
  /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3068
  /// for pointer types.
3069
  void BuildPointerTypeInfo(QualType PointeeTy);
3070
3071
  /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3072
  /// type_info for an object type.
3073
  void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3074
3075
  /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3076
  /// struct, used for member pointer types.
3077
  void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3078
3079
public:
3080
  ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3081
6.02k
      : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3082
3083
  // Pointer type info flags.
3084
  enum {
3085
    /// PTI_Const - Type has const qualifier.
3086
    PTI_Const = 0x1,
3087
3088
    /// PTI_Volatile - Type has volatile qualifier.
3089
    PTI_Volatile = 0x2,
3090
3091
    /// PTI_Restrict - Type has restrict qualifier.
3092
    PTI_Restrict = 0x4,
3093
3094
    /// PTI_Incomplete - Type is incomplete.
3095
    PTI_Incomplete = 0x8,
3096
3097
    /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3098
    /// (in pointer to member).
3099
    PTI_ContainingClassIncomplete = 0x10,
3100
3101
    /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3102
    //PTI_TransactionSafe = 0x20,
3103
3104
    /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3105
    PTI_Noexcept = 0x40,
3106
  };
3107
3108
  // VMI type info flags.
3109
  enum {
3110
    /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3111
    VMI_NonDiamondRepeat = 0x1,
3112
3113
    /// VMI_DiamondShaped - Class is diamond shaped.
3114
    VMI_DiamondShaped = 0x2
3115
  };
3116
3117
  // Base class type info flags.
3118
  enum {
3119
    /// BCTI_Virtual - Base class is virtual.
3120
    BCTI_Virtual = 0x1,
3121
3122
    /// BCTI_Public - Base class is public.
3123
    BCTI_Public = 0x2
3124
  };
3125
3126
  /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3127
  /// link to an existing RTTI descriptor if one already exists.
3128
  llvm::Constant *BuildTypeInfo(QualType Ty);
3129
3130
  /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3131
  llvm::Constant *BuildTypeInfo(
3132
      QualType Ty,
3133
      llvm::GlobalVariable::LinkageTypes Linkage,
3134
      llvm::GlobalValue::VisibilityTypes Visibility,
3135
      llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3136
};
3137
}
3138
3139
llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3140
3.04k
    QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3141
3.04k
  SmallString<256> Name;
3142
3.04k
  llvm::raw_svector_ostream Out(Name);
3143
3.04k
  CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
3144
3145
  // We know that the mangled name of the type starts at index 4 of the
3146
  // mangled name of the typename, so we can just index into it in order to
3147
  // get the mangled name of the type.
3148
3.04k
  llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3149
3.04k
                                                            Name.substr(4));
3150
3.04k
  auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3151
3152
3.04k
  llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3153
3.04k
      Name, Init->getType(), Linkage, Align.getQuantity());
3154
3155
3.04k
  GV->setInitializer(Init);
3156
3157
3.04k
  return GV;
3158
3.04k
}
3159
3160
llvm::Constant *
3161
1.32k
ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3162
  // Mangle the RTTI name.
3163
1.32k
  SmallString<256> Name;
3164
1.32k
  llvm::raw_svector_ostream Out(Name);
3165
1.32k
  CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3166
3167
  // Look for an existing global.
3168
1.32k
  llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3169
3170
1.32k
  if (!GV) {
3171
    // Create a new global variable.
3172
    // Note for the future: If we would ever like to do deferred emission of
3173
    // RTTI, check if emitting vtables opportunistically need any adjustment.
3174
3175
1.05k
    GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
3176
1.05k
                                  /*isConstant=*/true,
3177
1.05k
                                  llvm::GlobalValue::ExternalLinkage, nullptr,
3178
1.05k
                                  Name);
3179
1.05k
    const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3180
1.05k
    CGM.setGVProperties(GV, RD);
3181
    // Import the typeinfo symbol when all non-inline virtual methods are
3182
    // imported.
3183
1.05k
    if (CGM.getTarget().hasPS4DLLImportExport()) {
3184
15
      if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
3185
6
        GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3186
6
        CGM.setDSOLocal(GV);
3187
6
      }
3188
15
    }
3189
1.05k
  }
3190
3191
1.32k
  return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3192
1.32k
}
3193
3194
/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3195
/// info for that type is defined in the standard library.
3196
225
static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3197
  // Itanium C++ ABI 2.9.2:
3198
  //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
3199
  //   the run-time support library. Specifically, the run-time support
3200
  //   library should contain type_info objects for the types X, X* and
3201
  //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3202
  //   unsigned char, signed char, short, unsigned short, int, unsigned int,
3203
  //   long, unsigned long, long long, unsigned long long, float, double,
3204
  //   long double, char16_t, char32_t, and the IEEE 754r decimal and
3205
  //   half-precision floating point types.
3206
  //
3207
  // GCC also emits RTTI for __int128.
3208
  // FIXME: We do not emit RTTI information for decimal types here.
3209
3210
  // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3211
225
  switch (Ty->getKind()) {
3212
2
    case BuiltinType::Void:
3213
3
    case BuiltinType::NullPtr:
3214
4
    case BuiltinType::Bool:
3215
4
    case BuiltinType::WChar_S:
3216
4
    case BuiltinType::WChar_U:
3217
4
    case BuiltinType::Char_U:
3218
10
    case BuiltinType::Char_S:
3219
10
    case BuiltinType::UChar:
3220
10
    case BuiltinType::SChar:
3221
10
    case BuiltinType::Short:
3222
10
    case BuiltinType::UShort:
3223
197
    case BuiltinType::Int:
3224
197
    case BuiltinType::UInt:
3225
198
    case BuiltinType::Long:
3226
198
    case BuiltinType::ULong:
3227
198
    case BuiltinType::LongLong:
3228
198
    case BuiltinType::ULongLong:
3229
198
    case BuiltinType::Half:
3230
198
    case BuiltinType::Float:
3231
199
    case BuiltinType::Double:
3232
199
    case BuiltinType::LongDouble:
3233
199
    case BuiltinType::Float16:
3234
199
    case BuiltinType::Float128:
3235
199
    case BuiltinType::Ibm128:
3236
199
    case BuiltinType::Char8:
3237
199
    case BuiltinType::Char16:
3238
199
    case BuiltinType::Char32:
3239
199
    case BuiltinType::Int128:
3240
199
    case BuiltinType::UInt128:
3241
199
      return true;
3242
3243
0
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3244
0
    case BuiltinType::Id:
3245
199
#include "clang/Basic/OpenCLImageTypes.def"
3246
0
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3247
0
    case BuiltinType::Id:
3248
0
#include "clang/Basic/OpenCLExtensionTypes.def"
3249
0
    case BuiltinType::OCLSampler:
3250
0
    case BuiltinType::OCLEvent:
3251
0
    case BuiltinType::OCLClkEvent:
3252
0
    case BuiltinType::OCLQueue:
3253
0
    case BuiltinType::OCLReserveID:
3254
0
#define SVE_TYPE(Name, Id, SingletonId) \
3255
1.04k
    case BuiltinType::Id:
3256
1.04k
#include 
"clang/Basic/AArch64SVEACLETypes.def"0
3257
1.04k
#define PPC_VECTOR_TYPE(Name, Id, Size) \
3258
1.04k
    
case BuiltinType::Id:52
3259
1.04k
#include 
"clang/Basic/PPCTypes.def"26
3260
1.71k
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3261
1.71k
#include 
"clang/Basic/RISCVVTypes.def"26
3262
1.71k
    case BuiltinType::ShortAccum:
3263
26
    case BuiltinType::Accum:
3264
26
    case BuiltinType::LongAccum:
3265
26
    case BuiltinType::UShortAccum:
3266
26
    case BuiltinType::UAccum:
3267
26
    case BuiltinType::ULongAccum:
3268
26
    case BuiltinType::ShortFract:
3269
26
    case BuiltinType::Fract:
3270
26
    case BuiltinType::LongFract:
3271
26
    case BuiltinType::UShortFract:
3272
26
    case BuiltinType::UFract:
3273
26
    case BuiltinType::ULongFract:
3274
26
    case BuiltinType::SatShortAccum:
3275
26
    case BuiltinType::SatAccum:
3276
26
    case BuiltinType::SatLongAccum:
3277
26
    case BuiltinType::SatUShortAccum:
3278
26
    case BuiltinType::SatUAccum:
3279
26
    case BuiltinType::SatULongAccum:
3280
26
    case BuiltinType::SatShortFract:
3281
26
    case BuiltinType::SatFract:
3282
26
    case BuiltinType::SatLongFract:
3283
26
    case BuiltinType::SatUShortFract:
3284
26
    case BuiltinType::SatUFract:
3285
26
    case BuiltinType::SatULongFract:
3286
26
    case BuiltinType::BFloat16:
3287
26
      return false;
3288
3289
0
    case BuiltinType::Dependent:
3290
0
#define BUILTIN_TYPE(Id, SingletonId)
3291
0
#define PLACEHOLDER_TYPE(Id, SingletonId) \
3292
0
    case BuiltinType::Id:
3293
0
#include "clang/AST/BuiltinTypes.def"
3294
0
      llvm_unreachable("asking for RRTI for a placeholder type!");
3295
3296
0
    case BuiltinType::ObjCId:
3297
0
    case BuiltinType::ObjCClass:
3298
0
    case BuiltinType::ObjCSel:
3299
0
      llvm_unreachable("FIXME: Objective-C types are unsupported!");
3300
225
  }
3301
3302
0
  llvm_unreachable("Invalid BuiltinType Kind!");
3303
0
}
3304
3305
86
static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3306
86
  QualType PointeeTy = PointerTy->getPointeeType();
3307
86
  const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3308
86
  if (!BuiltinTy)
3309
72
    return false;
3310
3311
  // Check the qualifiers.
3312
14
  Qualifiers Quals = PointeeTy.getQualifiers();
3313
14
  Quals.removeConst();
3314
3315
14
  if (!Quals.empty())
3316
0
    return false;
3317
3318
14
  return TypeInfoIsInStandardLibrary(BuiltinTy);
3319
14
}
3320
3321
/// IsStandardLibraryRTTIDescriptor - Returns whether the type
3322
/// information for the given type exists in the standard library.
3323
3.61k
static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3324
  // Type info for builtin types is defined in the standard library.
3325
3.61k
  if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3326
211
    return TypeInfoIsInStandardLibrary(BuiltinTy);
3327
3328
  // Type info for some pointer types to builtin types is defined in the
3329
  // standard library.
3330
3.40k
  if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3331
86
    return TypeInfoIsInStandardLibrary(PointerTy);
3332
3333
3.32k
  return false;
3334
3.40k
}
3335
3336
/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3337
/// the given type exists somewhere else, and that we should not emit the type
3338
/// information in this translation unit.  Assumes that it is not a
3339
/// standard-library type.
3340
static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3341
3.42k
                                            QualType Ty) {
3342
3.42k
  ASTContext &Context = CGM.getContext();
3343
3344
  // If RTTI is disabled, assume it might be disabled in the
3345
  // translation unit that defines any potential key function, too.
3346
3.42k
  if (!Context.getLangOpts().RTTI) 
return false7
;
3347
3348
3.41k
  if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3349
3.10k
    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3350
3.10k
    if (!RD->hasDefinition())
3351
15
      return false;
3352
3353
3.09k
    if (!RD->isDynamicClass())
3354
277
      return false;
3355
3356
    // FIXME: this may need to be reconsidered if the key function
3357
    // changes.
3358
    // N.B. We must always emit the RTTI data ourselves if there exists a key
3359
    // function.
3360
2.81k
    bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3361
3362
    // Don't import the RTTI but emit it locally.
3363
2.81k
    if (CGM.getTriple().isWindowsGNUEnvironment())
3364
89
      return false;
3365
3366
2.72k
    if (CGM.getVTables().isVTableExternal(RD)) {
3367
1.12k
      if (CGM.getTarget().hasPS4DLLImportExport())
3368
15
        return true;
3369
3370
1.10k
      return IsDLLImport && 
!CGM.getTriple().isWindowsItaniumEnvironment()3
3371
1.10k
                 ? 
false0
3372
1.10k
                 : true;
3373
1.12k
    }
3374
1.60k
    if (IsDLLImport)
3375
0
      return true;
3376
1.60k
  }
3377
3378
1.90k
  return false;
3379
3.41k
}
3380
3381
/// IsIncompleteClassType - Returns whether the given record type is incomplete.
3382
2.15k
static bool IsIncompleteClassType(const RecordType *RecordTy) {
3383
2.15k
  return !RecordTy->getDecl()->isCompleteDefinition();
3384
2.15k
}
3385
3386
/// ContainsIncompleteClassType - Returns whether the given type contains an
3387
/// incomplete class type. This is true if
3388
///
3389
///   * The given type is an incomplete class type.
3390
///   * The given type is a pointer type whose pointee type contains an
3391
///     incomplete class type.
3392
///   * The given type is a member pointer type whose class is an incomplete
3393
///     class type.
3394
///   * The given type is a member pointer type whoise pointee type contains an
3395
///     incomplete class type.
3396
/// is an indirect or direct pointer to an incomplete class type.
3397
3.02k
static bool ContainsIncompleteClassType(QualType Ty) {
3398
3.02k
  if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3399
2.10k
    if (IsIncompleteClassType(RecordTy))
3400
92
      return true;
3401
2.10k
  }
3402
3403
2.93k
  if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3404
116
    return ContainsIncompleteClassType(PointerTy->getPointeeType());
3405
3406
2.81k
  if (const MemberPointerType *MemberPointerTy =
3407
2.81k
      dyn_cast<MemberPointerType>(Ty)) {
3408
    // Check if the class type is incomplete.
3409
25
    const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3410
25
    if (IsIncompleteClassType(ClassType))
3411
12
      return true;
3412
3413
13
    return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3414
25
  }
3415
3416
2.79k
  return false;
3417
2.81k
}
3418
3419
// CanUseSingleInheritance - Return whether the given record decl has a "single,
3420
// public, non-virtual base at offset zero (i.e. the derived class is dynamic
3421
// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3422
1.97k
static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3423
  // Check the number of bases.
3424
1.97k
  if (RD->getNumBases() != 1)
3425
422
    return false;
3426
3427
  // Get the base.
3428
1.55k
  CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3429
3430
  // Check that the base is not virtual.
3431
1.55k
  if (Base->isVirtual())
3432
394
    return false;
3433
3434
  // Check that the base is public.
3435
1.16k
  if (Base->getAccessSpecifier() != AS_public)
3436
50
    return false;
3437
3438
  // Check that the class is dynamic iff the base is.
3439
1.11k
  auto *BaseDecl =
3440
1.11k
      cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3441
1.11k
  if (!BaseDecl->isEmpty() &&
3442
1.11k
      
BaseDecl->isDynamicClass() != RD->isDynamicClass()1.06k
)
3443
6
    return false;
3444
3445
1.10k
  return true;
3446
1.11k
}
3447
3448
3.04k
void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3449
  // abi::__class_type_info.
3450
3.04k
  static const char * const ClassTypeInfo =
3451
3.04k
    "_ZTVN10__cxxabiv117__class_type_infoE";
3452
  // abi::__si_class_type_info.
3453
3.04k
  static const char * const SIClassTypeInfo =
3454
3.04k
    "_ZTVN10__cxxabiv120__si_class_type_infoE";
3455
  // abi::__vmi_class_type_info.
3456
3.04k
  static const char * const VMIClassTypeInfo =
3457
3.04k
    "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3458
3459
3.04k
  const char *VTableName = nullptr;
3460
3461
3.04k
  switch (Ty->getTypeClass()) {
3462
0
#define TYPE(Class, Base)
3463
0
#define ABSTRACT_TYPE(Class, Base)
3464
0
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3465
0
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3466
0
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3467
0
#include "clang/AST/TypeNodes.inc"
3468
0
    llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3469
3470
0
  case Type::LValueReference:
3471
0
  case Type::RValueReference:
3472
0
    llvm_unreachable("References shouldn't get here");
3473
3474
0
  case Type::Auto:
3475
0
  case Type::DeducedTemplateSpecialization:
3476
0
    llvm_unreachable("Undeduced type shouldn't get here");
3477
3478
0
  case Type::Pipe:
3479
0
    llvm_unreachable("Pipe types shouldn't get here");
3480
3481
276
  case Type::Builtin:
3482
288
  case Type::BitInt:
3483
  // GCC treats vector and complex types as fundamental types.
3484
356
  case Type::Vector:
3485
356
  case Type::ExtVector:
3486
356
  case Type::ConstantMatrix:
3487
356
  case Type::Complex:
3488
356
  case Type::Atomic:
3489
  // FIXME: GCC treats block pointers as fundamental types?!
3490
356
  case Type::BlockPointer:
3491
    // abi::__fundamental_type_info.
3492
356
    VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3493
356
    break;
3494
3495
5
  case Type::ConstantArray:
3496
5
  case Type::IncompleteArray:
3497
5
  case Type::VariableArray:
3498
    // abi::__array_type_info.
3499
5
    VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3500
5
    break;
3501
3502
0
  case Type::FunctionNoProto:
3503
85
  case Type::FunctionProto:
3504
    // abi::__function_type_info.
3505
85
    VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3506
85
    break;
3507
3508
2
  case Type::Enum:
3509
    // abi::__enum_type_info.
3510
2
    VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3511
2
    break;
3512
3513
1.98k
  case Type::Record: {
3514
1.98k
    const CXXRecordDecl *RD =
3515
1.98k
      cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3516
3517
1.98k
    if (!RD->hasDefinition() || 
!RD->getNumBases()1.97k
) {
3518
999
      VTableName = ClassTypeInfo;
3519
999
    } else 
if (989
CanUseSingleInheritance(RD)989
) {
3520
553
      VTableName = SIClassTypeInfo;
3521
553
    } else {
3522
436
      VTableName = VMIClassTypeInfo;
3523
436
    }
3524
3525
1.98k
    break;
3526
0
  }
3527
3528
7
  case Type::ObjCObject:
3529
    // Ignore protocol qualifiers.
3530
7
    Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3531
3532
    // Handle id and Class.
3533
7
    if (isa<BuiltinType>(Ty)) {
3534
6
      VTableName = ClassTypeInfo;
3535
6
      break;
3536
6
    }
3537
3538
1
    assert(isa<ObjCInterfaceType>(Ty));
3539
1
    LLVM_FALLTHROUGH;
3540
3541
3
  case Type::ObjCInterface:
3542
3
    if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3543
1
      VTableName = SIClassTypeInfo;
3544
2
    } else {
3545
2
      VTableName = ClassTypeInfo;
3546
2
    }
3547
3
    break;
3548
3549
8
  case Type::ObjCObjectPointer:
3550
580
  case Type::Pointer:
3551
    // abi::__pointer_type_info.
3552
580
    VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3553
580
    break;
3554
3555
21
  case Type::MemberPointer:
3556
    // abi::__pointer_to_member_type_info.
3557
21
    VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3558
21
    break;
3559
3.04k
  }
3560
3561
3.04k
  llvm::Constant *VTable = nullptr;
3562
3563
  // Check if the alias exists. If it doesn't, then get or create the global.
3564
3.04k
  if (CGM.getItaniumVTableContext().isRelativeLayout())
3565
48
    VTable = CGM.getModule().getNamedAlias(VTableName);
3566
3.04k
  if (!VTable)
3567
3.04k
    VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3568
3569
3.04k
  CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3570
3571
3.04k
  llvm::Type *PtrDiffTy =
3572
3.04k
      CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3573
3574
  // The vtable address point is 2.
3575
3.04k
  if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3576
    // The vtable address point is 8 bytes after its start:
3577
    // 4 for the offset to top + 4 for the relative offset to rtti.
3578
48
    llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3579
48
    VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3580
48
    VTable =
3581
48
        llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3582
2.99k
  } else {
3583
2.99k
    llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3584
2.99k
    VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
3585
2.99k
                                                          Two);
3586
2.99k
  }
3587
3.04k
  VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3588
3589
3.04k
  Fields.push_back(VTable);
3590
3.04k
}
3591
3592
/// Return the linkage that the type info and type info name constants
3593
/// should have for the given type.
3594
static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3595
2.29k
                                                             QualType Ty) {
3596
  // Itanium C++ ABI 2.9.5p7:
3597
  //   In addition, it and all of the intermediate abi::__pointer_type_info
3598
  //   structs in the chain down to the abi::__class_type_info for the
3599
  //   incomplete class type must be prevented from resolving to the
3600
  //   corresponding type_info structs for the complete class type, possibly
3601
  //   by making them local static objects. Finally, a dummy class RTTI is
3602
  //   generated for the incomplete type that will not resolve to the final
3603
  //   complete class RTTI (because the latter need not exist), possibly by
3604
  //   making it a local static object.
3605
2.29k
  if (ContainsIncompleteClassType(Ty))
3606
61
    return llvm::GlobalValue::InternalLinkage;
3607
3608
2.23k
  switch (Ty->getLinkage()) {
3609
25
  case NoLinkage:
3610
100
  case InternalLinkage:
3611
111
  case UniqueExternalLinkage:
3612
111
    return llvm::GlobalValue::InternalLinkage;
3613
3614
10
  case VisibleNoLinkage:
3615
10
  case ModuleInternalLinkage:
3616
10
  case ModuleLinkage:
3617
2.12k
  case ExternalLinkage:
3618
    // RTTI is not enabled, which means that this type info struct is going
3619
    // to be used for exception handling. Give it linkonce_odr linkage.
3620
2.12k
    if (!CGM.getLangOpts().RTTI)
3621
7
      return llvm::GlobalValue::LinkOnceODRLinkage;
3622
3623
2.11k
    if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3624
1.87k
      const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3625
1.87k
      if (RD->hasAttr<WeakAttr>())
3626
5
        return llvm::GlobalValue::WeakODRLinkage;
3627
1.86k
      if (CGM.getTriple().isWindowsItaniumEnvironment())
3628
20
        if (RD->hasAttr<DLLImportAttr>() &&
3629
20
            
ShouldUseExternalRTTIDescriptor(CGM, Ty)1
)
3630
0
          return llvm::GlobalValue::ExternalLinkage;
3631
      // MinGW always uses LinkOnceODRLinkage for type info.
3632
1.86k
      if (RD->isDynamicClass() &&
3633
1.86k
          !CGM.getContext()
3634
1.60k
               .getTargetInfo()
3635
1.60k
               .getTriple()
3636
1.60k
               .isWindowsGNUEnvironment())
3637
1.52k
        return CGM.getVTableLinkage(RD);
3638
1.86k
    }
3639
3640
592
    return llvm::GlobalValue::LinkOnceODRLinkage;
3641
2.23k
  }
3642
3643
0
  llvm_unreachable("Invalid linkage!");
3644
0
}
3645
3646
5.27k
llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3647
  // We want to operate on the canonical type.
3648
5.27k
  Ty = Ty.getCanonicalType();
3649
3650
  // Check if we've already emitted an RTTI descriptor for this type.
3651
5.27k
  SmallString<256> Name;
3652
5.27k
  llvm::raw_svector_ostream Out(Name);
3653
5.27k
  CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3654
3655
5.27k
  llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3656
5.27k
  if (OldGV && 
!OldGV->isDeclaration()1.94k
) {
3657
1.65k
    assert(!OldGV->hasAvailableExternallyLinkage() &&
3658
1.65k
           "available_externally typeinfos not yet implemented");
3659
3660
0
    return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3661
1.65k
  }
3662
3663
  // Check if there is already an external RTTI descriptor for this type.
3664
3.61k
  if (IsStandardLibraryRTTIDescriptor(Ty) ||
3665
3.61k
      
ShouldUseExternalRTTIDescriptor(CGM, Ty)3.42k
)
3666
1.32k
    return GetAddrOfExternalRTTIDescriptor(Ty);
3667
3668
  // Emit the standard library with external linkage.
3669
2.29k
  llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3670
3671
  // Give the type_info object and name the formal visibility of the
3672
  // type itself.
3673
2.29k
  llvm::GlobalValue::VisibilityTypes llvmVisibility;
3674
2.29k
  if (llvm::GlobalValue::isLocalLinkage(Linkage))
3675
    // If the linkage is local, only default visibility makes sense.
3676
174
    llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3677
2.12k
  else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3678
2.12k
           ItaniumCXXABI::RUK_NonUniqueHidden)
3679
6
    llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3680
2.11k
  else
3681
2.11k
    llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3682
3683
2.29k
  llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3684
2.29k
      llvm::GlobalValue::DefaultStorageClass;
3685
2.29k
  if (auto RD = Ty->getAsCXXRecordDecl()) {
3686
1.98k
    if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
3687
1.98k
         
RD->hasAttr<DLLExportAttr>()21
) ||
3688
1.98k
        
(1.98k
CGM.shouldMapVisibilityToDLLExport(RD)1.98k
&&
3689
1.98k
         
!llvm::GlobalValue::isLocalLinkage(Linkage)15
&&
3690
1.98k
         
llvmVisibility == llvm::GlobalValue::DefaultVisibility9
))
3691
15
      DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3692
1.98k
  }
3693
2.29k
  return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3694
3.61k
}
3695
3696
llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3697
      QualType Ty,
3698
      llvm::GlobalVariable::LinkageTypes Linkage,
3699
      llvm::GlobalValue::VisibilityTypes Visibility,
3700
3.04k
      llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3701
  // Add the vtable pointer.
3702
3.04k
  BuildVTablePointer(cast<Type>(Ty));
3703
3704
  // And the name.
3705
3.04k
  llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3706
3.04k
  llvm::Constant *TypeNameField;
3707
3708
  // If we're supposed to demote the visibility, be sure to set a flag
3709
  // to use a string comparison for type_info comparisons.
3710
3.04k
  ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3711
3.04k
      CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3712
3.04k
  if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3713
    // The flag is the sign bit, which on ARM64 is defined to be clear
3714
    // for global pointers.  This is very ARM64-specific.
3715
8
    TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3716
8
    llvm::Constant *flag =
3717
8
        llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3718
8
    TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3719
8
    TypeNameField =
3720
8
        llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3721
3.03k
  } else {
3722
3.03k
    TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3723
3.03k
  }
3724
3.04k
  Fields.push_back(TypeNameField);
3725
3726
3.04k
  switch (Ty->getTypeClass()) {
3727
0
#define TYPE(Class, Base)
3728
0
#define ABSTRACT_TYPE(Class, Base)
3729
0
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3730
0
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3731
0
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3732
0
#include "clang/AST/TypeNodes.inc"
3733
0
    llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3734
3735
  // GCC treats vector types as fundamental types.
3736
276
  case Type::Builtin:
3737
344
  case Type::Vector:
3738
344
  case Type::ExtVector:
3739
344
  case Type::ConstantMatrix:
3740
344
  case Type::Complex:
3741
344
  case Type::BlockPointer:
3742
    // Itanium C++ ABI 2.9.5p4:
3743
    // abi::__fundamental_type_info adds no data members to std::type_info.
3744
344
    break;
3745
3746
0
  case Type::LValueReference:
3747
0
  case Type::RValueReference:
3748
0
    llvm_unreachable("References shouldn't get here");
3749
3750
0
  case Type::Auto:
3751
0
  case Type::DeducedTemplateSpecialization:
3752
0
    llvm_unreachable("Undeduced type shouldn't get here");
3753
3754
0
  case Type::Pipe:
3755
0
    break;
3756
3757
12
  case Type::BitInt:
3758
12
    break;
3759
3760
5
  case Type::ConstantArray:
3761
5
  case Type::IncompleteArray:
3762
5
  case Type::VariableArray:
3763
    // Itanium C++ ABI 2.9.5p5:
3764
    // abi::__array_type_info adds no data members to std::type_info.
3765
5
    break;
3766
3767
0
  case Type::FunctionNoProto:
3768
85
  case Type::FunctionProto:
3769
    // Itanium C++ ABI 2.9.5p5:
3770
    // abi::__function_type_info adds no data members to std::type_info.
3771
85
    break;
3772
3773
2
  case Type::Enum:
3774
    // Itanium C++ ABI 2.9.5p5:
3775
    // abi::__enum_type_info adds no data members to std::type_info.
3776
2
    break;
3777
3778
1.98k
  case Type::Record: {
3779
1.98k
    const CXXRecordDecl *RD =
3780
1.98k
      cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3781
1.98k
    if (!RD->hasDefinition() || 
!RD->getNumBases()1.97k
) {
3782
      // We don't need to emit any fields.
3783
999
      break;
3784
999
    }
3785
3786
989
    if (CanUseSingleInheritance(RD))
3787
553
      BuildSIClassTypeInfo(RD);
3788
436
    else
3789
436
      BuildVMIClassTypeInfo(RD);
3790
3791
989
    break;
3792
1.98k
  }
3793
3794
7
  case Type::ObjCObject:
3795
9
  case Type::ObjCInterface:
3796
9
    BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3797
9
    break;
3798
3799
8
  case Type::ObjCObjectPointer:
3800
8
    BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3801
8
    break;
3802
3803
572
  case Type::Pointer:
3804
572
    BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3805
572
    break;
3806
3807
21
  case Type::MemberPointer:
3808
21
    BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3809
21
    break;
3810
3811
0
  case Type::Atomic:
3812
    // No fields, at least for the moment.
3813
0
    break;
3814
3.04k
  }
3815
3816
3.04k
  llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3817
3818
3.04k
  SmallString<256> Name;
3819
3.04k
  llvm::raw_svector_ostream Out(Name);
3820
3.04k
  CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3821
3.04k
  llvm::Module &M = CGM.getModule();
3822
3.04k
  llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3823
3.04k
  llvm::GlobalVariable *GV =
3824
3.04k
      new llvm::GlobalVariable(M, Init->getType(),
3825
3.04k
                               /*isConstant=*/true, Linkage, Init, Name);
3826
3827
  // Export the typeinfo in the same circumstances as the vtable is exported.
3828
3.04k
  auto GVDLLStorageClass = DLLStorageClass;
3829
3.04k
  if (CGM.getTarget().hasPS4DLLImportExport()) {
3830
21
    if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3831
21
      const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3832
21
      if (RD->hasAttr<DLLExportAttr>() ||
3833
21
          
CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)14
) {
3834
10
        GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
3835
10
      }
3836
21
    }
3837
21
  }
3838
3839
  // If there's already an old global variable, replace it with the new one.
3840
3.04k
  if (OldGV) {
3841
20
    GV->takeName(OldGV);
3842
20
    llvm::Constant *NewPtr =
3843
20
      llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3844
20
    OldGV->replaceAllUsesWith(NewPtr);
3845
20
    OldGV->eraseFromParent();
3846
20
  }
3847
3848
3.04k
  if (CGM.supportsCOMDAT() && 
GV->isWeakForLinker()1.03k
)
3849
667
    GV->setComdat(M.getOrInsertComdat(GV->getName()));
3850
3851
3.04k
  CharUnits Align =
3852
3.04k
      CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3853
3.04k
  GV->setAlignment(Align.getAsAlign());
3854
3855
  // The Itanium ABI specifies that type_info objects must be globally
3856
  // unique, with one exception: if the type is an incomplete class
3857
  // type or a (possibly indirect) pointer to one.  That exception
3858
  // affects the general case of comparing type_info objects produced
3859
  // by the typeid operator, which is why the comparison operators on
3860
  // std::type_info generally use the type_info name pointers instead
3861
  // of the object addresses.  However, the language's built-in uses
3862
  // of RTTI generally require class types to be complete, even when
3863
  // manipulating pointers to those class types.  This allows the
3864
  // implementation of dynamic_cast to rely on address equality tests,
3865
  // which is much faster.
3866
3867
  // All of this is to say that it's important that both the type_info
3868
  // object and the type_info name be uniqued when weakly emitted.
3869
3870
3.04k
  TypeName->setVisibility(Visibility);
3871
3.04k
  CGM.setDSOLocal(TypeName);
3872
3873
3.04k
  GV->setVisibility(Visibility);
3874
3.04k
  CGM.setDSOLocal(GV);
3875
3876
3.04k
  TypeName->setDLLStorageClass(DLLStorageClass);
3877
3.04k
  GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
3878
3.04k
                             ? 
GVDLLStorageClass21
3879
3.04k
                             : 
DLLStorageClass3.02k
);
3880
3881
3.04k
  TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3882
3.04k
  GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3883
3884
3.04k
  return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3885
3.04k
}
3886
3887
/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3888
/// for the given Objective-C object type.
3889
9
void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3890
  // Drop qualifiers.
3891
9
  const Type *T = OT->getBaseType().getTypePtr();
3892
9
  assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3893
3894
  // The builtin types are abi::__class_type_infos and don't require
3895
  // extra fields.
3896
9
  if (isa<BuiltinType>(T)) 
return6
;
3897
3898
3
  ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3899
3
  ObjCInterfaceDecl *Super = Class->getSuperClass();
3900
3901
  // Root classes are also __class_type_info.
3902
3
  if (!Super) 
return2
;
3903
3904
1
  QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3905
3906
  // Everything else is single inheritance.
3907
1
  llvm::Constant *BaseTypeInfo =
3908
1
      ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3909
1
  Fields.push_back(BaseTypeInfo);
3910
1
}
3911
3912
/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3913
/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3914
553
void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3915
  // Itanium C++ ABI 2.9.5p6b:
3916
  // It adds to abi::__class_type_info a single member pointing to the
3917
  // type_info structure for the base type,
3918
553
  llvm::Constant *BaseTypeInfo =
3919
553
    ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3920
553
  Fields.push_back(BaseTypeInfo);
3921
553
}
3922
3923
namespace {
3924
  /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3925
  /// a class hierarchy.
3926
  struct SeenBases {
3927
    llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3928
    llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3929
  };
3930
}
3931
3932
/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3933
/// abi::__vmi_class_type_info.
3934
///
3935
static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3936
1.57k
                                             SeenBases &Bases) {
3937
3938
1.57k
  unsigned Flags = 0;
3939
3940
1.57k
  auto *BaseDecl =
3941
1.57k
      cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3942
3943
1.57k
  if (Base->isVirtual()) {
3944
    // Mark the virtual base as seen.
3945
817
    if (!Bases.VirtualBases.insert(BaseDecl).second) {
3946
      // If this virtual base has been seen before, then the class is diamond
3947
      // shaped.
3948
410
      Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3949
410
    } else {
3950
407
      if (Bases.NonVirtualBases.count(BaseDecl))
3951
19
        Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3952
407
    }
3953
817
  } else {
3954
    // Mark the non-virtual base as seen.
3955
755
    if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3956
      // If this non-virtual base has been seen before, then the class has non-
3957
      // diamond shaped repeated inheritance.
3958
249
      Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3959
506
    } else {
3960
506
      if (Bases.VirtualBases.count(BaseDecl))
3961
26
        Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3962
506
    }
3963
755
  }
3964
3965
  // Walk all bases.
3966
1.57k
  for (const auto &I : BaseDecl->bases())
3967
886
    Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3968
3969
1.57k
  return Flags;
3970
1.57k
}
3971
3972
436
static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3973
436
  unsigned Flags = 0;
3974
436
  SeenBases Bases;
3975
3976
  // Walk all bases.
3977
436
  for (const auto &I : RD->bases())
3978
686
    Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3979
3980
436
  return Flags;
3981
436
}
3982
3983
/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3984
/// classes with bases that do not satisfy the abi::__si_class_type_info
3985
/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3986
436
void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3987
436
  llvm::Type *UnsignedIntLTy =
3988
436
    CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3989
3990
  // Itanium C++ ABI 2.9.5p6c:
3991
  //   __flags is a word with flags describing details about the class
3992
  //   structure, which may be referenced by using the __flags_masks
3993
  //   enumeration. These flags refer to both direct and indirect bases.
3994
436
  unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3995
436
  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3996
3997
  // Itanium C++ ABI 2.9.5p6c:
3998
  //   __base_count is a word with the number of direct proper base class
3999
  //   descriptions that follow.
4000
436
  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4001
4002
436
  if (!RD->getNumBases())
4003
0
    return;
4004
4005
  // Now add the base class descriptions.
4006
4007
  // Itanium C++ ABI 2.9.5p6c:
4008
  //   __base_info[] is an array of base class descriptions -- one for every
4009
  //   direct proper base. Each description is of the type:
4010
  //
4011
  //   struct abi::__base_class_type_info {
4012
  //   public:
4013
  //     const __class_type_info *__base_type;
4014
  //     long __offset_flags;
4015
  //
4016
  //     enum __offset_flags_masks {
4017
  //       __virtual_mask = 0x1,
4018
  //       __public_mask = 0x2,
4019
  //       __offset_shift = 8
4020
  //     };
4021
  //   };
4022
4023
  // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4024
  // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4025
  // LLP64 platforms.
4026
  // FIXME: Consider updating libc++abi to match, and extend this logic to all
4027
  // LLP64 platforms.
4028
436
  QualType OffsetFlagsTy = CGM.getContext().LongTy;
4029
436
  const TargetInfo &TI = CGM.getContext().getTargetInfo();
4030
436
  if (TI.getTriple().isOSCygMing() && 
TI.getPointerWidth(0) > TI.getLongWidth()12
)
4031
6
    OffsetFlagsTy = CGM.getContext().LongLongTy;
4032
436
  llvm::Type *OffsetFlagsLTy =
4033
436
      CGM.getTypes().ConvertType(OffsetFlagsTy);
4034
4035
686
  for (const auto &Base : RD->bases()) {
4036
    // The __base_type member points to the RTTI for the base type.
4037
686
    Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4038
4039
686
    auto *BaseDecl =
4040
686
        cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4041
4042
686
    int64_t OffsetFlags = 0;
4043
4044
    // All but the lower 8 bits of __offset_flags are a signed offset.
4045
    // For a non-virtual base, this is the offset in the object of the base
4046
    // subobject. For a virtual base, this is the offset in the virtual table of
4047
    // the virtual base offset for the virtual base referenced (negative).
4048
686
    CharUnits Offset;
4049
686
    if (Base.isVirtual())
4050
291
      Offset =
4051
291
        CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
4052
395
    else {
4053
395
      const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4054
395
      Offset = Layout.getBaseClassOffset(BaseDecl);
4055
395
    };
4056
4057
686
    OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4058
4059
    // The low-order byte of __offset_flags contains flags, as given by the
4060
    // masks from the enumeration __offset_flags_masks.
4061
686
    if (Base.isVirtual())
4062
291
      OffsetFlags |= BCTI_Virtual;
4063
686
    if (Base.getAccessSpecifier() == AS_public)
4064
625
      OffsetFlags |= BCTI_Public;
4065
4066
686
    Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4067
686
  }
4068
436
}
4069
4070
/// Compute the flags for a __pbase_type_info, and remove the corresponding
4071
/// pieces from \p Type.
4072
601
static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4073
601
  unsigned Flags = 0;
4074
4075
601
  if (Type.isConstQualified())
4076
254
    Flags |= ItaniumRTTIBuilder::PTI_Const;
4077
601
  if (Type.isVolatileQualified())
4078
4
    Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4079
601
  if (Type.isRestrictQualified())
4080
0
    Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4081
601
  Type = Type.getUnqualifiedType();
4082
4083
  // Itanium C++ ABI 2.9.5p7:
4084
  //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
4085
  //   incomplete class type, the incomplete target type flag is set.
4086
601
  if (ContainsIncompleteClassType(Type))
4087
43
    Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4088
4089
601
  if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4090
19
    if (Proto->isNothrow()) {
4091
2
      Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4092
2
      Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
4093
2
    }
4094
19
  }
4095
4096
601
  return Flags;
4097
601
}
4098
4099
/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4100
/// used for pointer types.
4101
580
void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4102
  // Itanium C++ ABI 2.9.5p7:
4103
  //   __flags is a flag word describing the cv-qualification and other
4104
  //   attributes of the type pointed to
4105
580
  unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4106
4107
580
  llvm::Type *UnsignedIntLTy =
4108
580
    CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4109
580
  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4110
4111
  // Itanium C++ ABI 2.9.5p7:
4112
  //  __pointee is a pointer to the std::type_info derivation for the
4113
  //  unqualified type being pointed to.
4114
580
  llvm::Constant *PointeeTypeInfo =
4115
580
      ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4116
580
  Fields.push_back(PointeeTypeInfo);
4117
580
}
4118
4119
/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4120
/// struct, used for member pointer types.
4121
void
4122
21
ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4123
21
  QualType PointeeTy = Ty->getPointeeType();
4124
4125
  // Itanium C++ ABI 2.9.5p7:
4126
  //   __flags is a flag word describing the cv-qualification and other
4127
  //   attributes of the type pointed to.
4128
21
  unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4129
4130
21
  const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4131
21
  if (IsIncompleteClassType(ClassType))
4132
8
    Flags |= PTI_ContainingClassIncomplete;
4133
4134
21
  llvm::Type *UnsignedIntLTy =
4135
21
    CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4136
21
  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4137
4138
  // Itanium C++ ABI 2.9.5p7:
4139
  //   __pointee is a pointer to the std::type_info derivation for the
4140
  //   unqualified type being pointed to.
4141
21
  llvm::Constant *PointeeTypeInfo =
4142
21
      ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4143
21
  Fields.push_back(PointeeTypeInfo);
4144
4145
  // Itanium C++ ABI 2.9.5p9:
4146
  //   __context is a pointer to an abi::__class_type_info corresponding to the
4147
  //   class type containing the member pointed to
4148
  //   (e.g., the "A" in "int A::*").
4149
21
  Fields.push_back(
4150
21
      ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4151
21
}
4152
4153
3.41k
llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4154
3.41k
  return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4155
3.41k
}
4156
4157
10
void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4158
  // Types added here must also be added to TypeInfoIsInStandardLibrary.
4159
10
  QualType FundamentalTypes[] = {
4160
10
      getContext().VoidTy,             getContext().NullPtrTy,
4161
10
      getContext().BoolTy,             getContext().WCharTy,
4162
10
      getContext().CharTy,             getContext().UnsignedCharTy,
4163
10
      getContext().SignedCharTy,       getContext().ShortTy,
4164
10
      getContext().UnsignedShortTy,    getContext().IntTy,
4165
10
      getContext().UnsignedIntTy,      getContext().LongTy,
4166
10
      getContext().UnsignedLongTy,     getContext().LongLongTy,
4167
10
      getContext().UnsignedLongLongTy, getContext().Int128Ty,
4168
10
      getContext().UnsignedInt128Ty,   getContext().HalfTy,
4169
10
      getContext().FloatTy,            getContext().DoubleTy,
4170
10
      getContext().LongDoubleTy,       getContext().Float128Ty,
4171
10
      getContext().Char8Ty,            getContext().Char16Ty,
4172
10
      getContext().Char32Ty
4173
10
  };
4174
10
  llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4175
10
      RD->hasAttr<DLLExportAttr>() || 
CGM.shouldMapVisibilityToDLLExport(RD)8
4176
10
          ? 
llvm::GlobalValue::DLLExportStorageClass5
4177
10
          : 
llvm::GlobalValue::DefaultStorageClass5
;
4178
10
  llvm::GlobalValue::VisibilityTypes Visibility =
4179
10
      CodeGenModule::GetLLVMVisibility(RD->getVisibility());
4180
250
  for (const QualType &FundamentalType : FundamentalTypes) {
4181
250
    QualType PointerType = getContext().getPointerType(FundamentalType);
4182
250
    QualType PointerTypeConst = getContext().getPointerType(
4183
250
        FundamentalType.withConst());
4184
250
    for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4185
750
      ItaniumRTTIBuilder(*this).BuildTypeInfo(
4186
750
          Type, llvm::GlobalValue::ExternalLinkage,
4187
750
          Visibility, DLLStorageClass);
4188
250
  }
4189
10
}
4190
4191
/// What sort of uniqueness rules should we use for the RTTI for the
4192
/// given type?
4193
ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4194
5.16k
    QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4195
5.16k
  if (shouldRTTIBeUnique())
4196
5.12k
    return RUK_Unique;
4197
4198
  // It's only necessary for linkonce_odr or weak_odr linkage.
4199
40
  if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4200
40
      
Linkage != llvm::GlobalValue::WeakODRLinkage20
)
4201
16
    return RUK_Unique;
4202
4203
  // It's only necessary with default visibility.
4204
24
  if (CanTy->getVisibility() != DefaultVisibility)
4205
8
    return RUK_Unique;
4206
4207
  // If we're not required to publish this symbol, hide it.
4208
16
  if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4209
12
    return RUK_NonUniqueHidden;
4210
4211
  // If we're required to publish this symbol, as we might be under an
4212
  // explicit instantiation, leave it with default visibility but
4213
  // enable string-comparisons.
4214
4
  assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4215
0
  return RUK_NonUniqueVisible;
4216
16
}
4217
4218
// Find out how to codegen the complete destructor and constructor
4219
namespace {
4220
enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4221
}
4222
static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4223
55.2k
                                       const CXXMethodDecl *MD) {
4224
55.2k
  if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4225
54.6k
    return StructorCodegen::Emit;
4226
4227
  // The complete and base structors are not equivalent if there are any virtual
4228
  // bases, so emit separate functions.
4229
522
  if (MD->getParent()->getNumVBases())
4230
33
    return StructorCodegen::Emit;
4231
4232
489
  GlobalDecl AliasDecl;
4233
489
  if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4234
351
    AliasDecl = GlobalDecl(DD, Dtor_Complete);
4235
351
  } else {
4236
138
    const auto *CD = cast<CXXConstructorDecl>(MD);
4237
138
    AliasDecl = GlobalDecl(CD, Ctor_Complete);
4238
138
  }
4239
489
  llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4240
4241
489
  if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4242
309
    return StructorCodegen::RAUW;
4243
4244
  // FIXME: Should we allow available_externally aliases?
4245
180
  if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4246
0
    return StructorCodegen::RAUW;
4247
4248
180
  if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4249
    // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4250
33
    if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4251
33
        
CGM.getTarget().getTriple().isOSBinFormatWasm()11
)
4252
22
      return StructorCodegen::COMDAT;
4253
11
    return StructorCodegen::Emit;
4254
33
  }
4255
4256
147
  return StructorCodegen::Alias;
4257
180
}
4258
4259
static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4260
                                           GlobalDecl AliasDecl,
4261
81
                                           GlobalDecl TargetDecl) {
4262
81
  llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4263
4264
81
  StringRef MangledName = CGM.getMangledName(AliasDecl);
4265
81
  llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4266
81
  if (Entry && 
!Entry->isDeclaration()2
)
4267
0
    return;
4268
4269
81
  auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4270
4271
  // Create the alias with no name.
4272
81
  auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4273
4274
  // Constructors and destructors are always unnamed_addr.
4275
81
  Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4276
4277
  // Switch any previous uses to the alias.
4278
81
  if (Entry) {
4279
2
    assert(Entry->getType() == Aliasee->getType() &&
4280
2
           "declaration exists with different type");
4281
0
    Alias->takeName(Entry);
4282
2
    Entry->replaceAllUsesWith(Alias);
4283
2
    Entry->eraseFromParent();
4284
79
  } else {
4285
79
    Alias->setName(MangledName);
4286
79
  }
4287
4288
  // Finally, set up the alias with its proper name and attributes.
4289
0
  CGM.SetCommonAttributes(AliasDecl, Alias);
4290
81
}
4291
4292
55.2k
void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4293
55.2k
  auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4294
55.2k
  auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4295
55.2k
  const CXXDestructorDecl *DD = CD ? 
nullptr39.2k
:
cast<CXXDestructorDecl>(MD)15.9k
;
4296
4297
55.2k
  StructorCodegen CGType = getCodegenToUse(CGM, MD);
4298
4299
55.2k
  if (CD ? 
GD.getCtorType() == Ctor_Complete39.2k
4300
55.2k
         : 
GD.getDtorType() == Dtor_Complete15.9k
) {
4301
24.0k
    GlobalDecl BaseDecl;
4302
24.0k
    if (CD)
4303
16.6k
      BaseDecl = GD.getWithCtorType(Ctor_Base);
4304
7.44k
    else
4305
7.44k
      BaseDecl = GD.getWithDtorType(Dtor_Base);
4306
4307
24.0k
    if (CGType == StructorCodegen::Alias || 
CGType == StructorCodegen::COMDAT24.0k
) {
4308
81
      emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4309
81
      return;
4310
81
    }
4311
4312
24.0k
    if (CGType == StructorCodegen::RAUW) {
4313
128
      StringRef MangledName = CGM.getMangledName(GD);
4314
128
      auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4315
128
      CGM.addReplacement(MangledName, Aliasee);
4316
128
      return;
4317
128
    }
4318
24.0k
  }
4319
4320
  // The base destructor is equivalent to the base destructor of its
4321
  // base class if there is exactly one non-virtual base class with a
4322
  // non-trivial destructor, there are no fields with a non-trivial
4323
  // destructor, and the body of the destructor is trivial.
4324
54.9k
  if (DD && 
GD.getDtorType() == Dtor_Base15.7k
&&
4325
54.9k
      
CGType != StructorCodegen::COMDAT7.81k
&&
4326
54.9k
      
!CGM.TryEmitBaseDestructorAsAlias(DD)7.80k
)
4327
34
    return;
4328
4329
  // FIXME: The deleting destructor is equivalent to the selected operator
4330
  // delete if:
4331
  //  * either the delete is a destroying operator delete or the destructor
4332
  //    would be trivial if it weren't virtual,
4333
  //  * the conversion from the 'this' parameter to the first parameter of the
4334
  //    destructor is equivalent to a bitcast,
4335
  //  * the destructor does not have an implicit "this" return, and
4336
  //  * the operator delete has the same calling convention and IR function type
4337
  //    as the destructor.
4338
  // In such cases we should try to emit the deleting dtor as an alias to the
4339
  // selected 'operator delete'.
4340
4341
54.9k
  llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4342
4343
54.9k
  if (CGType == StructorCodegen::COMDAT) {
4344
12
    SmallString<256> Buffer;
4345
12
    llvm::raw_svector_ostream Out(Buffer);
4346
12
    if (DD)
4347
10
      getMangleContext().mangleCXXDtorComdat(DD, Out);
4348
2
    else
4349
2
      getMangleContext().mangleCXXCtorComdat(CD, Out);
4350
12
    llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4351
12
    Fn->setComdat(C);
4352
54.9k
  } else {
4353
54.9k
    CGM.maybeSetTrivialComdat(*MD, *Fn);
4354
54.9k
  }
4355
54.9k
}
4356
4357
1.02k
static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4358
  // void *__cxa_begin_catch(void*);
4359
1.02k
  llvm::FunctionType *FTy = llvm::FunctionType::get(
4360
1.02k
      CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4361
4362
1.02k
  return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4363
1.02k
}
4364
4365
346
static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4366
  // void __cxa_end_catch();
4367
346
  llvm::FunctionType *FTy =
4368
346
      llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4369
4370
346
  return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4371
346
}
4372
4373
6
static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4374
  // void *__cxa_get_exception_ptr(void*);
4375
6
  llvm::FunctionType *FTy = llvm::FunctionType::get(
4376
6
      CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4377
4378
6
  return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4379
6
}
4380
4381
namespace {
4382
  /// A cleanup to call __cxa_end_catch.  In many cases, the caught
4383
  /// exception type lets us state definitively that the thrown exception
4384
  /// type does not have a destructor.  In particular:
4385
  ///   - Catch-alls tell us nothing, so we have to conservatively
4386
  ///     assume that the thrown exception might have a destructor.
4387
  ///   - Catches by reference behave according to their base types.
4388
  ///   - Catches of non-record types will only trigger for exceptions
4389
  ///     of non-record types, which never have destructors.
4390
  ///   - Catches of record types can trigger for arbitrary subclasses
4391
  ///     of the caught type, so we have to assume the actual thrown
4392
  ///     exception type might have a throwing destructor, even if the
4393
  ///     caught type's destructor is trivial or nothrow.
4394
  struct CallEndCatch final : EHScopeStack::Cleanup {
4395
272
    CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4396
    bool MightThrow;
4397
4398
346
    void Emit(CodeGenFunction &CGF, Flags flags) override {
4399
346
      if (!MightThrow) {
4400
68
        CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4401
68
        return;
4402
68
      }
4403
4404
278
      CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4405
278
    }
4406
  };
4407
}
4408
4409
/// Emits a call to __cxa_begin_catch and enters a cleanup to call
4410
/// __cxa_end_catch.
4411
///
4412
/// \param EndMightThrow - true if __cxa_end_catch might throw
4413
static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4414
                                   llvm::Value *Exn,
4415
272
                                   bool EndMightThrow) {
4416
272
  llvm::CallInst *call =
4417
272
    CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4418
4419
272
  CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4420
4421
272
  return call;
4422
272
}
4423
4424
/// A "special initializer" callback for initializing a catch
4425
/// parameter during catch initialization.
4426
static void InitCatchParam(CodeGenFunction &CGF,
4427
                           const VarDecl &CatchParam,
4428
                           Address ParamAddr,
4429
86
                           SourceLocation Loc) {
4430
  // Load the exception from where the landing pad saved it.
4431
86
  llvm::Value *Exn = CGF.getExceptionFromSlot();
4432
4433
86
  CanQualType CatchType =
4434
86
    CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4435
86
  llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4436
4437
  // If we're catching by reference, we can just cast the object
4438
  // pointer to the appropriate pointer.
4439
86
  if (isa<ReferenceType>(CatchType)) {
4440
38
    QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4441
38
    bool EndCatchMightThrow = CaughtType->isRecordType();
4442
4443
    // __cxa_begin_catch returns the adjusted object pointer.
4444
38
    llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4445
4446
    // We have no way to tell the personality function that we're
4447
    // catching by reference, so if we're catching a pointer,
4448
    // __cxa_begin_catch will actually return that pointer by value.
4449
38
    if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4450
9
      QualType PointeeType = PT->getPointeeType();
4451
4452
      // When catching by reference, generally we should just ignore
4453
      // this by-value pointer and use the exception object instead.
4454
9
      if (!PointeeType->isRecordType()) {
4455
4456
        // Exn points to the struct _Unwind_Exception header, which
4457
        // we have to skip past in order to reach the exception data.
4458
7
        unsigned HeaderSize =
4459
7
          CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4460
7
        AdjustedExn =
4461
7
            CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4462
4463
      // However, if we're catching a pointer-to-record type that won't
4464
      // work, because the personality function might have adjusted
4465
      // the pointer.  There's actually no way for us to fully satisfy
4466
      // the language/ABI contract here:  we can't use Exn because it
4467
      // might have the wrong adjustment, but we can't use the by-value
4468
      // pointer because it's off by a level of abstraction.
4469
      //
4470
      // The current solution is to dump the adjusted pointer into an
4471
      // alloca, which breaks language semantics (because changing the
4472
      // pointer doesn't change the exception) but at least works.
4473
      // The better solution would be to filter out non-exact matches
4474
      // and rethrow them, but this is tricky because the rethrow
4475
      // really needs to be catchable by other sites at this landing
4476
      // pad.  The best solution is to fix the personality function.
4477
7
      } else {
4478
        // Pull the pointer for the reference type off.
4479
2
        llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType);
4480
4481
        // Create the temporary and write the adjusted pointer into it.
4482
2
        Address ExnPtrTmp =
4483
2
          CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4484
2
        llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4485
2
        CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4486
4487
        // Bind the reference to the temporary.
4488
2
        AdjustedExn = ExnPtrTmp.getPointer();
4489
2
      }
4490
9
    }
4491
4492
38
    llvm::Value *ExnCast =
4493
38
      CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4494
38
    CGF.Builder.CreateStore(ExnCast, ParamAddr);
4495
38
    return;
4496
38
  }
4497
4498
  // Scalars and complexes.
4499
48
  TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4500
48
  if (TEK != TEK_Aggregate) {
4501
39
    llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4502
4503
    // If the catch type is a pointer type, __cxa_begin_catch returns
4504
    // the pointer by value.
4505
39
    if (CatchType->hasPointerRepresentation()) {
4506
8
      llvm::Value *CastExn =
4507
8
        CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4508
4509
8
      switch (CatchType.getQualifiers().getObjCLifetime()) {
4510
1
      case Qualifiers::OCL_Strong:
4511
1
        CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4512
1
        LLVM_FALLTHROUGH;
4513
4514
7
      case Qualifiers::OCL_None:
4515
7
      case Qualifiers::OCL_ExplicitNone:
4516
7
      case Qualifiers::OCL_Autoreleasing:
4517
7
        CGF.Builder.CreateStore(CastExn, ParamAddr);
4518
7
        return;
4519
4520
1
      case Qualifiers::OCL_Weak:
4521
1
        CGF.EmitARCInitWeak(ParamAddr, CastExn);
4522
1
        return;
4523
8
      }
4524
0
      llvm_unreachable("bad ownership qualifier!");
4525
0
    }
4526
4527
    // Otherwise, it returns a pointer into the exception object.
4528
4529
31
    llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4530
31
    llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4531
4532
31
    LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4533
31
    LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4534
31
    switch (TEK) {
4535
0
    case TEK_Complex:
4536
0
      CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4537
0
                             /*init*/ true);
4538
0
      return;
4539
31
    case TEK_Scalar: {
4540
31
      llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4541
31
      CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4542
31
      return;
4543
0
    }
4544
0
    case TEK_Aggregate:
4545
0
      llvm_unreachable("evaluation kind filtered out!");
4546
31
    }
4547
0
    llvm_unreachable("bad evaluation kind");
4548
0
  }
4549
4550
9
  assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4551
0
  auto catchRD = CatchType->getAsCXXRecordDecl();
4552
9
  CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4553
4554
9
  llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4555
4556
  // Check for a copy expression.  If we don't have a copy expression,
4557
  // that means a trivial copy is okay.
4558
9
  const Expr *copyExpr = CatchParam.getInit();
4559
9
  if (!copyExpr) {
4560
3
    llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4561
3
    Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4562
3
                        LLVMCatchTy, caughtExnAlignment);
4563
3
    LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4564
3
    LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4565
3
    CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4566
3
    return;
4567
3
  }
4568
4569
  // We have to call __cxa_get_exception_ptr to get the adjusted
4570
  // pointer before copying.
4571
6
  llvm::CallInst *rawAdjustedExn =
4572
6
    CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4573
4574
  // Cast that to the appropriate type.
4575
6
  Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4576
6
                      LLVMCatchTy, caughtExnAlignment);
4577
4578
  // The copy expression is defined in terms of an OpaqueValueExpr.
4579
  // Find it and map it to the adjusted expression.
4580
6
  CodeGenFunction::OpaqueValueMapping
4581
6
    opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4582
6
           CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4583
4584
  // Call the copy ctor in a terminate scope.
4585
6
  CGF.EHStack.pushTerminate();
4586
4587
  // Perform the copy construction.
4588
6
  CGF.EmitAggExpr(copyExpr,
4589
6
                  AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4590
6
                                        AggValueSlot::IsNotDestructed,
4591
6
                                        AggValueSlot::DoesNotNeedGCBarriers,
4592
6
                                        AggValueSlot::IsNotAliased,
4593
6
                                        AggValueSlot::DoesNotOverlap));
4594
4595
  // Leave the terminate scope.
4596
6
  CGF.EHStack.popTerminate();
4597
4598
  // Undo the opaque value mapping.
4599
6
  opaque.pop();
4600
4601
  // Finally we can call __cxa_begin_catch.
4602
6
  CallBeginCatch(CGF, Exn, true);
4603
6
}
4604
4605
/// Begins a catch statement by initializing the catch variable and
4606
/// calling __cxa_begin_catch.
4607
void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4608
272
                                   const CXXCatchStmt *S) {
4609
  // We have to be very careful with the ordering of cleanups here:
4610
  //   C++ [except.throw]p4:
4611
  //     The destruction [of the exception temporary] occurs
4612
  //     immediately after the destruction of the object declared in
4613
  //     the exception-declaration in the handler.
4614
  //
4615
  // So the precise ordering is:
4616
  //   1.  Construct catch variable.
4617
  //   2.  __cxa_begin_catch
4618
  //   3.  Enter __cxa_end_catch cleanup
4619
  //   4.  Enter dtor cleanup
4620
  //
4621
  // We do this by using a slightly abnormal initialization process.
4622
  // Delegation sequence:
4623
  //   - ExitCXXTryStmt opens a RunCleanupsScope
4624
  //     - EmitAutoVarAlloca creates the variable and debug info
4625
  //       - InitCatchParam initializes the variable from the exception
4626
  //       - CallBeginCatch calls __cxa_begin_catch
4627
  //       - CallBeginCatch enters the __cxa_end_catch cleanup
4628
  //     - EmitAutoVarCleanups enters the variable destructor cleanup
4629
  //   - EmitCXXTryStmt emits the code for the catch body
4630
  //   - EmitCXXTryStmt close the RunCleanupsScope
4631
4632
272
  VarDecl *CatchParam = S->getExceptionDecl();
4633
272
  if (!CatchParam) {
4634
186
    llvm::Value *Exn = CGF.getExceptionFromSlot();
4635
186
    CallBeginCatch(CGF, Exn, true);
4636
186
    return;
4637
186
  }
4638
4639
  // Emit the local.
4640
86
  CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4641
86
  InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4642
86
  CGF.EmitAutoVarCleanups(var);
4643
86
}
4644
4645
/// Get or define the following function:
4646
///   void @__clang_call_terminate(i8* %exn) nounwind noreturn
4647
/// This code is used only in C++.
4648
5.00k
static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4649
5.00k
  llvm::FunctionType *fnTy =
4650
5.00k
    llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4651
5.00k
  llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4652
5.00k
      fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4653
5.00k
  llvm::Function *fn =
4654
5.00k
      cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4655
5.00k
  if (fn->empty()) {
4656
748
    fn->setDoesNotThrow();
4657
748
    fn->setDoesNotReturn();
4658
4659
    // What we really want is to massively penalize inlining without
4660
    // forbidding it completely.  The difference between that and
4661
    // 'noinline' is negligible.
4662
748
    fn->addFnAttr(llvm::Attribute::NoInline);
4663
4664
    // Allow this function to be shared across translation units, but
4665
    // we don't want it to turn into an exported symbol.
4666
748
    fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4667
748
    fn->setVisibility(llvm::Function::HiddenVisibility);
4668
748
    if (CGM.supportsCOMDAT())
4669
173
      fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4670
4671
    // Set up the function.
4672
748
    llvm::BasicBlock *entry =
4673
748
        llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4674
748
    CGBuilderTy builder(CGM, entry);
4675
4676
    // Pull the exception pointer out of the parameter list.
4677
748
    llvm::Value *exn = &*fn->arg_begin();
4678
4679
    // Call __cxa_begin_catch(exn).
4680
748
    llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4681
748
    catchCall->setDoesNotThrow();
4682
748
    catchCall->setCallingConv(CGM.getRuntimeCC());
4683
4684
    // Call std::terminate().
4685
748
    llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4686
748
    termCall->setDoesNotThrow();
4687
748
    termCall->setDoesNotReturn();
4688
748
    termCall->setCallingConv(CGM.getRuntimeCC());
4689
4690
    // std::terminate cannot return.
4691
748
    builder.CreateUnreachable();
4692
748
  }
4693
5.00k
  return fnRef;
4694
5.00k
}
4695
4696
llvm::CallInst *
4697
ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4698
5.01k
                                                   llvm::Value *Exn) {
4699
  // In C++, we want to call __cxa_begin_catch() before terminating.
4700
5.01k
  if (Exn) {
4701
5.00k
    assert(CGF.CGM.getLangOpts().CPlusPlus);
4702
0
    return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4703
5.00k
  }
4704
3
  return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4705
5.01k
}
4706
4707
std::pair<llvm::Value *, const CXXRecordDecl *>
4708
ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4709
27
                             const CXXRecordDecl *RD) {
4710
27
  return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4711
27
}
4712
4713
void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4714
0
                                       const CXXCatchStmt *C) {
4715
0
  if (CGF.getTarget().hasFeature("exception-handling"))
4716
0
    CGF.EHStack.pushCleanup<CatchRetScope>(
4717
0
        NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4718
0
  ItaniumCXXABI::emitBeginCatch(CGF, C);
4719
0
}
4720
4721
llvm::CallInst *
4722
WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4723
0
                                                       llvm::Value *Exn) {
4724
  // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4725
  // the violating exception to mark it handled, but it is currently hard to do
4726
  // with wasm EH instruction structure with catch/catch_all, we just call
4727
  // std::terminate and ignore the violating exception as in CGCXXABI.
4728
  // TODO Consider code transformation that makes calling __clang_call_terminate
4729
  // possible.
4730
0
  return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
4731
0
}
4732
4733
/// Register a global destructor as best as we know how.
4734
void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4735
                                  llvm::FunctionCallee Dtor,
4736
41
                                  llvm::Constant *Addr) {
4737
41
  if (D.getTLSKind() != VarDecl::TLS_None) {
4738
    // atexit routine expects "int(*)(int,...)"
4739
5
    llvm::FunctionType *FTy =
4740
5
        llvm::FunctionType::get(CGM.IntTy, CGM.IntTy, true);
4741
5
    llvm::PointerType *FpTy = FTy->getPointerTo();
4742
4743
    // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
4744
5
    llvm::FunctionType *AtExitTy =
4745
5
        llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, FpTy}, true);
4746
4747
    // Fetch the actual function.
4748
5
    llvm::FunctionCallee AtExit =
4749
5
        CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
4750
4751
    // Create __dtor function for the var decl.
4752
5
    llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
4753
4754
    // Register above __dtor with atexit().
4755
    // First param is flags and must be 0, second param is function ptr
4756
5
    llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
4757
5
    CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
4758
4759
    // Cannot unregister TLS __dtor so done
4760
5
    return;
4761
5
  }
4762
4763
  // Create __dtor function for the var decl.
4764
36
  llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
4765
4766
  // Register above __dtor with atexit().
4767
36
  CGF.registerGlobalDtorWithAtExit(DtorStub);
4768
4769
  // Emit __finalize function to unregister __dtor and (as appropriate) call
4770
  // __dtor.
4771
36
  emitCXXStermFinalizer(D, DtorStub, Addr);
4772
36
}
4773
4774
void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4775
36
                                     llvm::Constant *addr) {
4776
36
  llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4777
36
  SmallString<256> FnName;
4778
36
  {
4779
36
    llvm::raw_svector_ostream Out(FnName);
4780
36
    getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4781
36
  }
4782
4783
  // Create the finalization action associated with a variable.
4784
36
  const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
4785
36
  llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4786
36
      FTy, FnName.str(), FI, D.getLocation());
4787
4788
36
  CodeGenFunction CGF(CGM);
4789
4790
36
  CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4791
36
                    FunctionArgList(), D.getLocation(),
4792
36
                    D.getInit()->getExprLoc());
4793
4794
  // The unatexit subroutine unregisters __dtor functions that were previously
4795
  // registered by the atexit subroutine. If the referenced function is found,
4796
  // the unatexit returns a value of 0, meaning that the cleanup is still
4797
  // pending (and we should call the __dtor function).
4798
36
  llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4799
4800
36
  llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4801
4802
36
  llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4803
36
  llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4804
4805
  // Check if unatexit returns a value of 0. If it does, jump to
4806
  // DestructCallBlock, otherwise jump to EndBlock directly.
4807
36
  CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4808
4809
36
  CGF.EmitBlock(DestructCallBlock);
4810
4811
  // Emit the call to dtorStub.
4812
36
  llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4813
4814
  // Make sure the call and the callee agree on calling convention.
4815
36
  CI->setCallingConv(dtorStub->getCallingConv());
4816
4817
36
  CGF.EmitBlock(EndBlock);
4818
4819
36
  CGF.FinishFunction();
4820
4821
36
  if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
4822
8
    CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
4823
8
                                             IPA->getPriority());
4824
28
  } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
4825
28
             
getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR24
) {
4826
    // According to C++ [basic.start.init]p2, class template static data
4827
    // members (i.e., implicitly or explicitly instantiated specializations)
4828
    // have unordered initialization. As a consequence, we can put them into
4829
    // their own llvm.global_dtors entry.
4830
6
    CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4831
22
  } else {
4832
22
    CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4833
22
  }
4834
36
}