Coverage Report

Created: 2020-10-24 06:27

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file contains the code for emitting atomic operations.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CGCall.h"
14
#include "CGRecordLayout.h"
15
#include "CodeGenFunction.h"
16
#include "CodeGenModule.h"
17
#include "TargetInfo.h"
18
#include "clang/AST/ASTContext.h"
19
#include "clang/CodeGen/CGFunctionInfo.h"
20
#include "clang/Frontend/FrontendDiagnostic.h"
21
#include "llvm/ADT/DenseMap.h"
22
#include "llvm/IR/DataLayout.h"
23
#include "llvm/IR/Intrinsics.h"
24
#include "llvm/IR/Operator.h"
25
26
using namespace clang;
27
using namespace CodeGen;
28
29
namespace {
30
  class AtomicInfo {
31
    CodeGenFunction &CGF;
32
    QualType AtomicTy;
33
    QualType ValueTy;
34
    uint64_t AtomicSizeInBits;
35
    uint64_t ValueSizeInBits;
36
    CharUnits AtomicAlign;
37
    CharUnits ValueAlign;
38
    TypeEvaluationKind EvaluationKind;
39
    bool UseLibcall;
40
    LValue LVal;
41
    CGBitFieldInfo BFI;
42
  public:
43
    AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44
        : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45
1.80k
          EvaluationKind(TEK_Scalar), UseLibcall(true) {
46
1.80k
      assert(!lvalue.isGlobalReg());
47
1.80k
      ASTContext &C = CGF.getContext();
48
1.80k
      if (lvalue.isSimple()) {
49
1.70k
        AtomicTy = lvalue.getType();
50
1.70k
        if (auto *ATy = AtomicTy->getAs<AtomicType>())
51
835
          ValueTy = ATy->getValueType();
52
871
        else
53
871
          ValueTy = AtomicTy;
54
1.70k
        EvaluationKind = CGF.getEvaluationKind(ValueTy);
55
56
1.70k
        uint64_t ValueAlignInBits;
57
1.70k
        uint64_t AtomicAlignInBits;
58
1.70k
        TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59
1.70k
        ValueSizeInBits = ValueTI.Width;
60
1.70k
        ValueAlignInBits = ValueTI.Align;
61
62
1.70k
        TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63
1.70k
        AtomicSizeInBits = AtomicTI.Width;
64
1.70k
        AtomicAlignInBits = AtomicTI.Align;
65
66
1.70k
        assert(ValueSizeInBits <= AtomicSizeInBits);
67
1.70k
        assert(ValueAlignInBits <= AtomicAlignInBits);
68
69
1.70k
        AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70
1.70k
        ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71
1.70k
        if (lvalue.getAlignment().isZero())
72
0
          lvalue.setAlignment(AtomicAlign);
73
74
1.70k
        LVal = lvalue;
75
96
      } else if (lvalue.isBitField()) {
76
80
        ValueTy = lvalue.getType();
77
80
        ValueSizeInBits = C.getTypeSize(ValueTy);
78
80
        auto &OrigBFI = lvalue.getBitFieldInfo();
79
80
        auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80
80
        AtomicSizeInBits = C.toBits(
81
80
            C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82
80
                .alignTo(lvalue.getAlignment()));
83
80
        auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
84
80
        auto OffsetInChars =
85
80
            (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86
80
            lvalue.getAlignment();
87
80
        VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88
80
            VoidPtrAddr, OffsetInChars.getQuantity());
89
80
        auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
90
80
            VoidPtrAddr,
91
80
            CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92
80
            "atomic_bitfield_base");
93
80
        BFI = OrigBFI;
94
80
        BFI.Offset = Offset;
95
80
        BFI.StorageSize = AtomicSizeInBits;
96
80
        BFI.StorageOffset += OffsetInChars;
97
80
        LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
98
80
                                    BFI, lvalue.getType(), lvalue.getBaseInfo(),
99
80
                                    lvalue.getTBAAInfo());
100
80
        AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101
80
        if (AtomicTy.isNull()) {
102
8
          llvm::APInt Size(
103
8
              /*numBits=*/32,
104
8
              C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
105
8
          AtomicTy =
106
8
              C.getConstantArrayType(C.CharTy, Size, nullptr, ArrayType::Normal,
107
8
                                     /*IndexTypeQuals=*/0);
108
8
        }
109
80
        AtomicAlign = ValueAlign = lvalue.getAlignment();
110
16
      } else if (lvalue.isVectorElt()) {
111
8
        ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
112
8
        ValueSizeInBits = C.getTypeSize(ValueTy);
113
8
        AtomicTy = lvalue.getType();
114
8
        AtomicSizeInBits = C.getTypeSize(AtomicTy);
115
8
        AtomicAlign = ValueAlign = lvalue.getAlignment();
116
8
        LVal = lvalue;
117
8
      } else {
118
8
        assert(lvalue.isExtVectorElt());
119
8
        ValueTy = lvalue.getType();
120
8
        ValueSizeInBits = C.getTypeSize(ValueTy);
121
8
        AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
122
8
            lvalue.getType(), cast<llvm::FixedVectorType>(
123
8
                                  lvalue.getExtVectorAddress().getElementType())
124
8
                                  ->getNumElements());
125
8
        AtomicSizeInBits = C.getTypeSize(AtomicTy);
126
8
        AtomicAlign = ValueAlign = lvalue.getAlignment();
127
8
        LVal = lvalue;
128
8
      }
129
1.80k
      UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
130
1.80k
          AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
131
1.80k
    }
132
133
117
    QualType getAtomicType() const { return AtomicTy; }
134
280
    QualType getValueType() const { return ValueTy; }
135
2.54k
    CharUnits getAtomicAlignment() const { return AtomicAlign; }
136
225
    uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
137
255
    uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
138
911
    TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
139
656
    bool shouldUseLibcall() const { return UseLibcall; }
140
464
    const LValue &getAtomicLValue() const { return LVal; }
141
1.60k
    llvm::Value *getAtomicPointer() const {
142
1.60k
      if (LVal.isSimple())
143
1.25k
        return LVal.getPointer(CGF);
144
352
      else if (LVal.isBitField())
145
304
        return LVal.getBitFieldPointer();
146
48
      else if (LVal.isVectorElt())
147
24
        return LVal.getVectorPointer();
148
24
      assert(LVal.isExtVectorElt());
149
24
      return LVal.getExtVectorPointer();
150
24
    }
151
1.43k
    Address getAtomicAddress() const {
152
1.43k
      return Address(getAtomicPointer(), getAtomicAlignment());
153
1.43k
    }
154
155
595
    Address getAtomicAddressAsAtomicIntPointer() const {
156
595
      return emitCastToAtomicIntPointer(getAtomicAddress());
157
595
    }
158
159
    /// Is the atomic size larger than the underlying value type?
160
    ///
161
    /// Note that the absence of padding does not mean that atomic
162
    /// objects are completely interchangeable with non-atomic
163
    /// objects: we might have promoted the alignment of a type
164
    /// without making it bigger.
165
1.08k
    bool hasPadding() const {
166
1.08k
      return (ValueSizeInBits != AtomicSizeInBits);
167
1.08k
    }
168
169
    bool emitMemSetZeroIfNecessary() const;
170
171
165
    llvm::Value *getAtomicSizeValue() const {
172
165
      CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
173
165
      return CGF.CGM.getSize(size);
174
165
    }
175
176
    /// Cast the given pointer to an integer pointer suitable for atomic
177
    /// operations if the source.
178
    Address emitCastToAtomicIntPointer(Address Addr) const;
179
180
    /// If Addr is compatible with the iN that will be used for an atomic
181
    /// operation, bitcast it. Otherwise, create a temporary that is suitable
182
    /// and copy the value across.
183
    Address convertToAtomicIntPointer(Address Addr) const;
184
185
    /// Turn an atomic-layout object into an r-value.
186
    RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
187
                                     SourceLocation loc, bool AsValue) const;
188
189
    /// Converts a rvalue to integer value.
190
    llvm::Value *convertRValueToInt(RValue RVal) const;
191
192
    RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
193
                                     AggValueSlot ResultSlot,
194
                                     SourceLocation Loc, bool AsValue) const;
195
196
    /// Copy an atomic r-value into atomic-layout memory.
197
    void emitCopyIntoMemory(RValue rvalue) const;
198
199
    /// Project an l-value down to the value field.
200
166
    LValue projectValue() const {
201
166
      assert(LVal.isSimple());
202
166
      Address addr = getAtomicAddress();
203
166
      if (hasPadding())
204
2
        addr = CGF.Builder.CreateStructGEP(addr, 0);
205
206
166
      return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
207
166
                              LVal.getBaseInfo(), LVal.getTBAAInfo());
208
166
    }
209
210
    /// Emits atomic load.
211
    /// \returns Loaded value.
212
    RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
213
                          bool AsValue, llvm::AtomicOrdering AO,
214
                          bool IsVolatile);
215
216
    /// Emits atomic compare-and-exchange sequence.
217
    /// \param Expected Expected value.
218
    /// \param Desired Desired value.
219
    /// \param Success Atomic ordering for success operation.
220
    /// \param Failure Atomic ordering for failed operation.
221
    /// \param IsWeak true if atomic operation is weak, false otherwise.
222
    /// \returns Pair of values: previous value from storage (value type) and
223
    /// boolean flag (i1 type) with true if success and false otherwise.
224
    std::pair<RValue, llvm::Value *>
225
    EmitAtomicCompareExchange(RValue Expected, RValue Desired,
226
                              llvm::AtomicOrdering Success =
227
                                  llvm::AtomicOrdering::SequentiallyConsistent,
228
                              llvm::AtomicOrdering Failure =
229
                                  llvm::AtomicOrdering::SequentiallyConsistent,
230
                              bool IsWeak = false);
231
232
    /// Emits atomic update.
233
    /// \param AO Atomic ordering.
234
    /// \param UpdateOp Update operation for the current lvalue.
235
    void EmitAtomicUpdate(llvm::AtomicOrdering AO,
236
                          const llvm::function_ref<RValue(RValue)> &UpdateOp,
237
                          bool IsVolatile);
238
    /// Emits atomic update.
239
    /// \param AO Atomic ordering.
240
    void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
241
                          bool IsVolatile);
242
243
    /// Materialize an atomic r-value in atomic-layout memory.
244
    Address materializeRValue(RValue rvalue) const;
245
246
    /// Creates temp alloca for intermediate operations on atomic value.
247
    Address CreateTempAlloca() const;
248
  private:
249
    bool requiresMemSetZero(llvm::Type *type) const;
250
251
252
    /// Emits atomic load as a libcall.
253
    void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
254
                               llvm::AtomicOrdering AO, bool IsVolatile);
255
    /// Emits atomic load as LLVM instruction.
256
    llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
257
    /// Emits atomic compare-and-exchange op as a libcall.
258
    llvm::Value *EmitAtomicCompareExchangeLibcall(
259
        llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
260
        llvm::AtomicOrdering Success =
261
            llvm::AtomicOrdering::SequentiallyConsistent,
262
        llvm::AtomicOrdering Failure =
263
            llvm::AtomicOrdering::SequentiallyConsistent);
264
    /// Emits atomic compare-and-exchange op as LLVM instruction.
265
    std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
266
        llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
267
        llvm::AtomicOrdering Success =
268
            llvm::AtomicOrdering::SequentiallyConsistent,
269
        llvm::AtomicOrdering Failure =
270
            llvm::AtomicOrdering::SequentiallyConsistent,
271
        bool IsWeak = false);
272
    /// Emit atomic update as libcalls.
273
    void
274
    EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
275
                            const llvm::function_ref<RValue(RValue)> &UpdateOp,
276
                            bool IsVolatile);
277
    /// Emit atomic update as LLVM instructions.
278
    void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
279
                            const llvm::function_ref<RValue(RValue)> &UpdateOp,
280
                            bool IsVolatile);
281
    /// Emit atomic update as libcalls.
282
    void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
283
                                 bool IsVolatile);
284
    /// Emit atomic update as LLVM instructions.
285
    void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
286
                            bool IsVolatile);
287
  };
288
}
289
290
1.10k
Address AtomicInfo::CreateTempAlloca() const {
291
1.10k
  Address TempAlloca = CGF.CreateMemTemp(
292
1.10k
      (LVal.isBitField() && 
ValueSizeInBits > AtomicSizeInBits132
) ?
ValueTy54
293
1.05k
                                                                : AtomicTy,
294
1.10k
      getAtomicAlignment(),
295
1.10k
      "atomic-temp");
296
  // Cast to pointer to value type for bitfields.
297
1.10k
  if (LVal.isBitField())
298
132
    return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
299
132
        TempAlloca, getAtomicAddress().getType());
300
976
  return TempAlloca;
301
976
}
302
303
static RValue emitAtomicLibcall(CodeGenFunction &CGF,
304
                                StringRef fnName,
305
                                QualType resultType,
306
348
                                CallArgList &args) {
307
348
  const CGFunctionInfo &fnInfo =
308
348
    CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
309
348
  llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
310
348
  llvm::AttrBuilder fnAttrB;
311
348
  fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
312
348
  fnAttrB.addAttribute(llvm::Attribute::WillReturn);
313
348
  llvm::AttributeList fnAttrs = llvm::AttributeList::get(
314
348
      CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
315
316
348
  llvm::FunctionCallee fn =
317
348
      CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);
318
348
  auto callee = CGCallee::forDirect(fn);
319
348
  return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
320
348
}
321
322
/// Does a store of the given IR type modify the full expected width?
323
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
324
354
                           uint64_t expectedSize) {
325
354
  return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
326
354
}
327
328
/// Does the atomic type require memsetting to zero before initialization?
329
///
330
/// The IR type is provided as a way of making certain queries faster.
331
375
bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
332
  // If the atomic type has size padding, we definitely need a memset.
333
375
  if (hasPadding()) 
return true14
;
334
335
  // Otherwise, do some simple heuristics to try to avoid it:
336
361
  switch (getEvaluationKind()) {
337
  // For scalars and complexes, check whether the store size of the
338
  // type uses the full size.
339
314
  case TEK_Scalar:
340
314
    return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
341
40
  case TEK_Complex:
342
40
    return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
343
40
                           AtomicSizeInBits / 2);
344
345
  // Padding in structs has an undefined bit pattern.  User beware.
346
7
  case TEK_Aggregate:
347
7
    return false;
348
0
  }
349
0
  llvm_unreachable("bad evaluation kind");
350
0
}
351
352
166
bool AtomicInfo::emitMemSetZeroIfNecessary() const {
353
166
  assert(LVal.isSimple());
354
166
  llvm::Value *addr = LVal.getPointer(CGF);
355
166
  if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
356
132
    return false;
357
358
34
  CGF.Builder.CreateMemSet(
359
34
      addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
360
34
      CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
361
34
      LVal.getAlignment().getAsAlign());
362
34
  return true;
363
34
}
364
365
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
366
                              Address Dest, Address Ptr,
367
                              Address Val1, Address Val2,
368
                              uint64_t Size,
369
                              llvm::AtomicOrdering SuccessOrder,
370
                              llvm::AtomicOrdering FailureOrder,
371
114
                              llvm::SyncScope::ID Scope) {
372
  // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
373
114
  llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
374
114
  llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
375
376
114
  llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
377
114
      Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
378
114
      Scope);
379
114
  Pair->setVolatile(E->isVolatile());
380
114
  Pair->setWeak(IsWeak);
381
382
  // Cmp holds the result of the compare-exchange operation: true on success,
383
  // false on failure.
384
114
  llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
385
114
  llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
386
387
  // This basic block is used to hold the store instruction if the operation
388
  // failed.
389
114
  llvm::BasicBlock *StoreExpectedBB =
390
114
      CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
391
392
  // This basic block is the exit point of the operation, we should end up
393
  // here regardless of whether or not the operation succeeded.
394
114
  llvm::BasicBlock *ContinueBB =
395
114
      CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
396
397
  // Update Expected if Expected isn't equal to Old, otherwise branch to the
398
  // exit point.
399
114
  CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
400
401
114
  CGF.Builder.SetInsertPoint(StoreExpectedBB);
402
  // Update the memory at Expected with Old's value.
403
114
  CGF.Builder.CreateStore(Old, Val1);
404
  // Finally, branch to the exit point.
405
114
  CGF.Builder.CreateBr(ContinueBB);
406
407
114
  CGF.Builder.SetInsertPoint(ContinueBB);
408
  // Update the memory at Dest with Cmp's value.
409
114
  CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
410
114
}
411
412
/// Given an ordering required on success, emit all possible cmpxchg
413
/// instructions to cope with the provided (but possibly only dynamically known)
414
/// FailureOrder.
415
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
416
                                        bool IsWeak, Address Dest, Address Ptr,
417
                                        Address Val1, Address Val2,
418
                                        llvm::Value *FailureOrderVal,
419
                                        uint64_t Size,
420
                                        llvm::AtomicOrdering SuccessOrder,
421
80
                                        llvm::SyncScope::ID Scope) {
422
80
  llvm::AtomicOrdering FailureOrder;
423
80
  if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
424
38
    auto FOS = FO->getSExtValue();
425
38
    if (!llvm::isValidAtomicOrderingCABI(FOS))
426
2
      FailureOrder = llvm::AtomicOrdering::Monotonic;
427
36
    else
428
36
      switch ((llvm::AtomicOrderingCABI)FOS) {
429
7
      case llvm::AtomicOrderingCABI::relaxed:
430
7
      case llvm::AtomicOrderingCABI::release:
431
7
      case llvm::AtomicOrderingCABI::acq_rel:
432
7
        FailureOrder = llvm::AtomicOrdering::Monotonic;
433
7
        break;
434
20
      case llvm::AtomicOrderingCABI::consume:
435
20
      case llvm::AtomicOrderingCABI::acquire:
436
20
        FailureOrder = llvm::AtomicOrdering::Acquire;
437
20
        break;
438
9
      case llvm::AtomicOrderingCABI::seq_cst:
439
9
        FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
440
9
        break;
441
38
      }
442
38
    if (isStrongerThan(FailureOrder, SuccessOrder)) {
443
      // Don't assert on undefined behavior "failure argument shall be no
444
      // stronger than the success argument".
445
0
      FailureOrder =
446
0
          llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
447
0
    }
448
38
    emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
449
38
                      FailureOrder, Scope);
450
38
    return;
451
38
  }
452
453
  // Create all the relevant BB's
454
42
  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
455
42
                   *SeqCstBB = nullptr;
456
42
  MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
457
42
  if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
458
34
      SuccessOrder != llvm::AtomicOrdering::Release)
459
26
    AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
460
42
  if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
461
8
    SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
462
463
42
  llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
464
465
42
  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
466
467
  // Emit all the different atomics
468
469
  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
470
  // doesn't matter unless someone is crazy enough to use something that
471
  // doesn't fold to a constant for the ordering.
472
42
  CGF.Builder.SetInsertPoint(MonotonicBB);
473
42
  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
474
42
                    Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
475
42
  CGF.Builder.CreateBr(ContBB);
476
477
42
  if (AcquireBB) {
478
26
    CGF.Builder.SetInsertPoint(AcquireBB);
479
26
    emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
480
26
                      Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
481
26
    CGF.Builder.CreateBr(ContBB);
482
26
    SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
483
26
                AcquireBB);
484
26
    SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
485
26
                AcquireBB);
486
26
  }
487
42
  if (SeqCstBB) {
488
8
    CGF.Builder.SetInsertPoint(SeqCstBB);
489
8
    emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
490
8
                      llvm::AtomicOrdering::SequentiallyConsistent, Scope);
491
8
    CGF.Builder.CreateBr(ContBB);
492
8
    SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
493
8
                SeqCstBB);
494
8
  }
495
496
42
  CGF.Builder.SetInsertPoint(ContBB);
497
42
}
498
499
/// Duplicate the atomic min/max operation in conventional IR for the builtin
500
/// variants that return the new rather than the original value.
501
static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
502
                                         AtomicExpr::AtomicOp Op,
503
                                         bool IsSigned,
504
                                         llvm::Value *OldVal,
505
14
                                         llvm::Value *RHS) {
506
14
  llvm::CmpInst::Predicate Pred;
507
14
  switch (Op) {
508
0
  default:
509
0
    llvm_unreachable("Unexpected min/max operation");
510
4
  case AtomicExpr::AO__atomic_max_fetch:
511
2
    Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
512
4
    break;
513
10
  case AtomicExpr::AO__atomic_min_fetch:
514
6
    Pred = IsSigned ? 
llvm::CmpInst::ICMP_SLT4
: llvm::CmpInst::ICMP_ULT;
515
10
    break;
516
14
  }
517
14
  llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
518
14
  return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
519
14
}
520
521
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
522
                         Address Ptr, Address Val1, Address Val2,
523
                         llvm::Value *IsWeak, llvm::Value *FailureOrder,
524
                         uint64_t Size, llvm::AtomicOrdering Order,
525
1.73k
                         llvm::SyncScope::ID Scope) {
526
1.73k
  llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
527
1.73k
  bool PostOpMinMax = false;
528
1.73k
  unsigned PostOp = 0;
529
530
1.73k
  switch (E->getOp()) {
531
0
  case AtomicExpr::AO__c11_atomic_init:
532
0
  case AtomicExpr::AO__opencl_atomic_init:
533
0
    llvm_unreachable("Already handled!");
534
535
35
  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
536
35
  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
537
35
    emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
538
35
                                FailureOrder, Size, Order, Scope);
539
35
    return;
540
4
  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
541
4
  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
542
4
    emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
543
4
                                FailureOrder, Size, Order, Scope);
544
4
    return;
545
29
  case AtomicExpr::AO__atomic_compare_exchange:
546
29
  case AtomicExpr::AO__atomic_compare_exchange_n: {
547
29
    if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
548
17
      emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
549
17
                                  Val1, Val2, FailureOrder, Size, Order, Scope);
550
12
    } else {
551
      // Create all the relevant BB's
552
12
      llvm::BasicBlock *StrongBB =
553
12
          CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
554
12
      llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
555
12
      llvm::BasicBlock *ContBB =
556
12
          CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
557
558
12
      llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
559
12
      SI->addCase(CGF.Builder.getInt1(false), StrongBB);
560
561
12
      CGF.Builder.SetInsertPoint(StrongBB);
562
12
      emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
563
12
                                  FailureOrder, Size, Order, Scope);
564
12
      CGF.Builder.CreateBr(ContBB);
565
566
12
      CGF.Builder.SetInsertPoint(WeakBB);
567
12
      emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
568
12
                                  FailureOrder, Size, Order, Scope);
569
12
      CGF.Builder.CreateBr(ContBB);
570
571
12
      CGF.Builder.SetInsertPoint(ContBB);
572
12
    }
573
29
    return;
574
29
  }
575
373
  case AtomicExpr::AO__c11_atomic_load:
576
373
  case AtomicExpr::AO__opencl_atomic_load:
577
373
  case AtomicExpr::AO__atomic_load_n:
578
373
  case AtomicExpr::AO__atomic_load: {
579
373
    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
580
373
    Load->setAtomic(Order, Scope);
581
373
    Load->setVolatile(E->isVolatile());
582
373
    CGF.Builder.CreateStore(Load, Dest);
583
373
    return;
584
373
  }
585
586
344
  case AtomicExpr::AO__c11_atomic_store:
587
344
  case AtomicExpr::AO__opencl_atomic_store:
588
344
  case AtomicExpr::AO__atomic_store:
589
344
  case AtomicExpr::AO__atomic_store_n: {
590
344
    llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
591
344
    llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
592
344
    Store->setAtomic(Order, Scope);
593
344
    Store->setVolatile(E->isVolatile());
594
344
    return;
595
344
  }
596
597
25
  case AtomicExpr::AO__c11_atomic_exchange:
598
25
  case AtomicExpr::AO__opencl_atomic_exchange:
599
25
  case AtomicExpr::AO__atomic_exchange_n:
600
25
  case AtomicExpr::AO__atomic_exchange:
601
25
    Op = llvm::AtomicRMWInst::Xchg;
602
25
    break;
603
604
60
  case AtomicExpr::AO__atomic_add_fetch:
605
60
    PostOp = llvm::Instruction::Add;
606
60
    LLVM_FALLTHROUGH;
607
471
  case AtomicExpr::AO__c11_atomic_fetch_add:
608
471
  case AtomicExpr::AO__opencl_atomic_fetch_add:
609
471
  case AtomicExpr::AO__atomic_fetch_add:
610
471
    Op = llvm::AtomicRMWInst::Add;
611
471
    break;
612
613
0
  case AtomicExpr::AO__atomic_sub_fetch:
614
0
    PostOp = llvm::Instruction::Sub;
615
0
    LLVM_FALLTHROUGH;
616
383
  case AtomicExpr::AO__c11_atomic_fetch_sub:
617
383
  case AtomicExpr::AO__opencl_atomic_fetch_sub:
618
383
  case AtomicExpr::AO__atomic_fetch_sub:
619
383
    Op = llvm::AtomicRMWInst::Sub;
620
383
    break;
621
622
8
  case AtomicExpr::AO__atomic_min_fetch:
623
8
    PostOpMinMax = true;
624
8
    LLVM_FALLTHROUGH;
625
24
  case AtomicExpr::AO__c11_atomic_fetch_min:
626
24
  case AtomicExpr::AO__opencl_atomic_fetch_min:
627
24
  case AtomicExpr::AO__atomic_fetch_min:
628
13
    Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
629
11
                                                  : llvm::AtomicRMWInst::UMin;
630
24
    break;
631
632
4
  case AtomicExpr::AO__atomic_max_fetch:
633
4
    PostOpMinMax = true;
634
4
    LLVM_FALLTHROUGH;
635
20
  case AtomicExpr::AO__c11_atomic_fetch_max:
636
20
  case AtomicExpr::AO__opencl_atomic_fetch_max:
637
20
  case AtomicExpr::AO__atomic_fetch_max:
638
11
    Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
639
9
                                                  : llvm::AtomicRMWInst::UMax;
640
20
    break;
641
642
0
  case AtomicExpr::AO__atomic_and_fetch:
643
0
    PostOp = llvm::Instruction::And;
644
0
    LLVM_FALLTHROUGH;
645
6
  case AtomicExpr::AO__c11_atomic_fetch_and:
646
6
  case AtomicExpr::AO__opencl_atomic_fetch_and:
647
6
  case AtomicExpr::AO__atomic_fetch_and:
648
6
    Op = llvm::AtomicRMWInst::And;
649
6
    break;
650
651
2
  case AtomicExpr::AO__atomic_or_fetch:
652
2
    PostOp = llvm::Instruction::Or;
653
2
    LLVM_FALLTHROUGH;
654
7
  case AtomicExpr::AO__c11_atomic_fetch_or:
655
7
  case AtomicExpr::AO__opencl_atomic_fetch_or:
656
7
  case AtomicExpr::AO__atomic_fetch_or:
657
7
    Op = llvm::AtomicRMWInst::Or;
658
7
    break;
659
660
0
  case AtomicExpr::AO__atomic_xor_fetch:
661
0
    PostOp = llvm::Instruction::Xor;
662
0
    LLVM_FALLTHROUGH;
663
4
  case AtomicExpr::AO__c11_atomic_fetch_xor:
664
4
  case AtomicExpr::AO__opencl_atomic_fetch_xor:
665
4
  case AtomicExpr::AO__atomic_fetch_xor:
666
4
    Op = llvm::AtomicRMWInst::Xor;
667
4
    break;
668
669
4
  case AtomicExpr::AO__atomic_nand_fetch:
670
4
    PostOp = llvm::Instruction::And; // the NOT is special cased below
671
4
    LLVM_FALLTHROUGH;
672
8
  case AtomicExpr::AO__atomic_fetch_nand:
673
8
    Op = llvm::AtomicRMWInst::Nand;
674
8
    break;
675
948
  }
676
677
948
  llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
678
948
  llvm::AtomicRMWInst *RMWI =
679
948
      CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
680
948
  RMWI->setVolatile(E->isVolatile());
681
682
  // For __atomic_*_fetch operations, perform the operation again to
683
  // determine the value which was written.
684
948
  llvm::Value *Result = RMWI;
685
948
  if (PostOpMinMax)
686
12
    Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
687
12
                                  E->getValueType()->isSignedIntegerType(),
688
12
                                  RMWI, LoadVal1);
689
936
  else if (PostOp)
690
66
    Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
691
66
                                     LoadVal1);
692
948
  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
693
4
    Result = CGF.Builder.CreateNot(Result);
694
948
  CGF.Builder.CreateStore(Result, Dest);
695
948
}
696
697
// This function emits any expression (scalar, complex, or aggregate)
698
// into a temporary alloca.
699
static Address
700
609
EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
701
609
  Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
702
609
  CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
703
609
                       /*Init*/ true);
704
609
  return DeclPtr;
705
609
}
706
707
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
708
                         Address Ptr, Address Val1, Address Val2,
709
                         llvm::Value *IsWeak, llvm::Value *FailureOrder,
710
                         uint64_t Size, llvm::AtomicOrdering Order,
711
1.70k
                         llvm::Value *Scope) {
712
1.70k
  auto ScopeModel = Expr->getScopeModel();
713
714
  // LLVM atomic instructions always have synch scope. If clang atomic
715
  // expression has no scope operand, use default LLVM synch scope.
716
1.70k
  if (!ScopeModel) {
717
1.65k
    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
718
1.65k
                 Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
719
1.65k
    return;
720
1.65k
  }
721
722
  // Handle constant scope.
723
58
  if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
724
50
    auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
725
50
        CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
726
50
        Order, CGF.CGM.getLLVMContext());
727
50
    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
728
50
                 Order, SCID);
729
50
    return;
730
50
  }
731
732
  // Handle non-constant scope.
733
8
  auto &Builder = CGF.Builder;
734
8
  auto Scopes = ScopeModel->getRuntimeValues();
735
8
  llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
736
8
  for (auto S : Scopes)
737
32
    BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
738
739
8
  llvm::BasicBlock *ContBB =
740
8
      CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
741
742
8
  auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
743
  // If unsupported synch scope is encountered at run time, assume a fallback
744
  // synch scope value.
745
8
  auto FallBack = ScopeModel->getFallBackValue();
746
8
  llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
747
32
  for (auto S : Scopes) {
748
32
    auto *B = BB[S];
749
32
    if (S != FallBack)
750
24
      SI->addCase(Builder.getInt32(S), B);
751
752
32
    Builder.SetInsertPoint(B);
753
32
    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
754
32
                 Order,
755
32
                 CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
756
32
                                                         ScopeModel->map(S),
757
32
                                                         Order,
758
32
                                                         CGF.getLLVMContext()));
759
32
    Builder.CreateBr(ContBB);
760
32
  }
761
762
8
  Builder.SetInsertPoint(ContBB);
763
8
}
764
765
static void
766
AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
767
                  bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
768
133
                  SourceLocation Loc, CharUnits SizeInChars) {
769
133
  if (UseOptimizedLibcall) {
770
    // Load value and pass it to the function directly.
771
81
    CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
772
81
    int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
773
81
    ValTy =
774
81
        CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
775
81
    llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
776
81
                                                SizeInBits)->getPointerTo();
777
81
    Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
778
81
    Val = CGF.EmitLoadOfScalar(Ptr, false,
779
81
                               CGF.getContext().getPointerType(ValTy),
780
81
                               Loc);
781
    // Coerce the value into an appropriately sized integer type.
782
81
    Args.add(RValue::get(Val), ValTy);
783
52
  } else {
784
    // Non-optimized functions always take a reference.
785
52
    Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
786
52
                         CGF.getContext().VoidPtrTy);
787
52
  }
788
133
}
789
790
992
RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
791
992
  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
792
992
  QualType MemTy = AtomicTy;
793
992
  if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
794
609
    MemTy = AT->getValueType();
795
992
  llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
796
797
992
  Address Val1 = Address::invalid();
798
992
  Address Val2 = Address::invalid();
799
992
  Address Dest = Address::invalid();
800
992
  Address Ptr = EmitPointerWithAlignment(E->getPtr());
801
802
992
  if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
803
978
      E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
804
16
    LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
805
16
    EmitAtomicInit(E->getVal1(), lvalue);
806
16
    return RValue::get(nullptr);
807
16
  }
808
809
976
  auto TInfo = getContext().getTypeInfoInChars(AtomicTy);
810
976
  uint64_t Size = TInfo.Width.getQuantity();
811
976
  unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
812
813
976
  bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
814
976
  bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
815
976
  bool UseLibcall = Misaligned | Oversized;
816
976
  CharUnits MaxInlineWidth =
817
976
      getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
818
819
976
  DiagnosticsEngine &Diags = CGM.getDiags();
820
821
976
  if (Misaligned) {
822
80
    Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
823
80
        << (int)TInfo.Width.getQuantity()
824
80
        << (int)Ptr.getAlignment().getQuantity();
825
80
  }
826
827
976
  if (Oversized) {
828
148
    Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
829
148
        << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
830
148
  }
831
832
976
  llvm::Value *Order = EmitScalarExpr(E->getOrder());
833
976
  llvm::Value *Scope =
834
903
      E->getScopeModel() ? 
EmitScalarExpr(E->getScope())73
: nullptr;
835
836
976
  switch (E->getOp()) {
837
0
  case AtomicExpr::AO__c11_atomic_init:
838
0
  case AtomicExpr::AO__opencl_atomic_init:
839
0
    llvm_unreachable("Already handled above with EmitAtomicInit!");
840
841
153
  case AtomicExpr::AO__c11_atomic_load:
842
153
  case AtomicExpr::AO__opencl_atomic_load:
843
153
  case AtomicExpr::AO__atomic_load_n:
844
153
    break;
845
846
88
  case AtomicExpr::AO__atomic_load:
847
88
    Dest = EmitPointerWithAlignment(E->getVal1());
848
88
    break;
849
850
84
  case AtomicExpr::AO__atomic_store:
851
84
    Val1 = EmitPointerWithAlignment(E->getVal1());
852
84
    break;
853
854
14
  case AtomicExpr::AO__atomic_exchange:
855
14
    Val1 = EmitPointerWithAlignment(E->getVal1());
856
14
    Dest = EmitPointerWithAlignment(E->getVal2());
857
14
    break;
858
859
69
  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
860
69
  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
861
69
  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
862
69
  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
863
69
  case AtomicExpr::AO__atomic_compare_exchange_n:
864
69
  case AtomicExpr::AO__atomic_compare_exchange:
865
69
    Val1 = EmitPointerWithAlignment(E->getVal1());
866
69
    if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
867
22
      Val2 = EmitPointerWithAlignment(E->getVal2());
868
47
    else
869
47
      Val2 = EmitValToTemp(*this, E->getVal2());
870
69
    OrderFail = EmitScalarExpr(E->getOrderFail());
871
69
    if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
872
61
        E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
873
30
      IsWeak = EmitScalarExpr(E->getWeak());
874
69
    break;
875
876
223
  case AtomicExpr::AO__c11_atomic_fetch_add:
877
223
  case AtomicExpr::AO__c11_atomic_fetch_sub:
878
223
  case AtomicExpr::AO__opencl_atomic_fetch_add:
879
223
  case AtomicExpr::AO__opencl_atomic_fetch_sub:
880
223
    if (MemTy->isPointerType()) {
881
      // For pointer arithmetic, we're required to do a bit of math:
882
      // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
883
      // ... but only for the C11 builtins. The GNU builtins expect the
884
      // user to multiply by sizeof(T).
885
6
      QualType Val1Ty = E->getVal1()->getType();
886
6
      llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
887
6
      CharUnits PointeeIncAmt =
888
6
          getContext().getTypeSizeInChars(MemTy->getPointeeType());
889
6
      Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
890
6
      auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
891
6
      Val1 = Temp;
892
6
      EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
893
6
      break;
894
6
    }
895
217
      LLVM_FALLTHROUGH;
896
562
  case AtomicExpr::AO__atomic_fetch_add:
897
562
  case AtomicExpr::AO__atomic_fetch_sub:
898
562
  case AtomicExpr::AO__atomic_add_fetch:
899
562
  case AtomicExpr::AO__atomic_sub_fetch:
900
562
  case AtomicExpr::AO__c11_atomic_store:
901
562
  case AtomicExpr::AO__c11_atomic_exchange:
902
562
  case AtomicExpr::AO__opencl_atomic_store:
903
562
  case AtomicExpr::AO__opencl_atomic_exchange:
904
562
  case AtomicExpr::AO__atomic_store_n:
905
562
  case AtomicExpr::AO__atomic_exchange_n:
906
562
  case AtomicExpr::AO__c11_atomic_fetch_and:
907
562
  case AtomicExpr::AO__c11_atomic_fetch_or:
908
562
  case AtomicExpr::AO__c11_atomic_fetch_xor:
909
562
  case AtomicExpr::AO__c11_atomic_fetch_max:
910
562
  case AtomicExpr::AO__c11_atomic_fetch_min:
911
562
  case AtomicExpr::AO__opencl_atomic_fetch_and:
912
562
  case AtomicExpr::AO__opencl_atomic_fetch_or:
913
562
  case AtomicExpr::AO__opencl_atomic_fetch_xor:
914
562
  case AtomicExpr::AO__opencl_atomic_fetch_min:
915
562
  case AtomicExpr::AO__opencl_atomic_fetch_max:
916
562
  case AtomicExpr::AO__atomic_fetch_and:
917
562
  case AtomicExpr::AO__atomic_fetch_or:
918
562
  case AtomicExpr::AO__atomic_fetch_xor:
919
562
  case AtomicExpr::AO__atomic_fetch_nand:
920
562
  case AtomicExpr::AO__atomic_and_fetch:
921
562
  case AtomicExpr::AO__atomic_or_fetch:
922
562
  case AtomicExpr::AO__atomic_xor_fetch:
923
562
  case AtomicExpr::AO__atomic_nand_fetch:
924
562
  case AtomicExpr::AO__atomic_max_fetch:
925
562
  case AtomicExpr::AO__atomic_min_fetch:
926
562
  case AtomicExpr::AO__atomic_fetch_max:
927
562
  case AtomicExpr::AO__atomic_fetch_min:
928
562
    Val1 = EmitValToTemp(*this, E->getVal1());
929
562
    break;
930
976
  }
931
932
976
  QualType RValTy = E->getType().getUnqualifiedType();
933
934
  // The inlined atomics only function on iN types, where N is a power of 2. We
935
  // need to make sure (via temporaries if necessary) that all incoming values
936
  // are compatible.
937
976
  LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
938
976
  AtomicInfo Atomics(*this, AtomicVal);
939
940
976
  Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
941
976
  if (Val1.isValid()) 
Val1 = Atomics.convertToAtomicIntPointer(Val1)735
;
942
976
  if (Val2.isValid()) 
Val2 = Atomics.convertToAtomicIntPointer(Val2)69
;
943
976
  if (Dest.isValid())
944
102
    Dest = Atomics.emitCastToAtomicIntPointer(Dest);
945
874
  else if (E->isCmpXChg())
946
69
    Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
947
805
  else if (!RValTy->isVoidType())
948
580
    Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
949
950
  // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
951
976
  if (UseLibcall) {
952
183
    bool UseOptimizedLibcall = false;
953
183
    switch (E->getOp()) {
954
0
    case AtomicExpr::AO__c11_atomic_init:
955
0
    case AtomicExpr::AO__opencl_atomic_init:
956
0
      llvm_unreachable("Already handled above with EmitAtomicInit!");
957
958
40
    case AtomicExpr::AO__c11_atomic_fetch_add:
959
40
    case AtomicExpr::AO__opencl_atomic_fetch_add:
960
40
    case AtomicExpr::AO__atomic_fetch_add:
961
40
    case AtomicExpr::AO__c11_atomic_fetch_and:
962
40
    case AtomicExpr::AO__opencl_atomic_fetch_and:
963
40
    case AtomicExpr::AO__atomic_fetch_and:
964
40
    case AtomicExpr::AO__c11_atomic_fetch_or:
965
40
    case AtomicExpr::AO__opencl_atomic_fetch_or:
966
40
    case AtomicExpr::AO__atomic_fetch_or:
967
40
    case AtomicExpr::AO__atomic_fetch_nand:
968
40
    case AtomicExpr::AO__c11_atomic_fetch_sub:
969
40
    case AtomicExpr::AO__opencl_atomic_fetch_sub:
970
40
    case AtomicExpr::AO__atomic_fetch_sub:
971
40
    case AtomicExpr::AO__c11_atomic_fetch_xor:
972
40
    case AtomicExpr::AO__opencl_atomic_fetch_xor:
973
40
    case AtomicExpr::AO__opencl_atomic_fetch_min:
974
40
    case AtomicExpr::AO__opencl_atomic_fetch_max:
975
40
    case AtomicExpr::AO__atomic_fetch_xor:
976
40
    case AtomicExpr::AO__c11_atomic_fetch_max:
977
40
    case AtomicExpr::AO__c11_atomic_fetch_min:
978
40
    case AtomicExpr::AO__atomic_add_fetch:
979
40
    case AtomicExpr::AO__atomic_and_fetch:
980
40
    case AtomicExpr::AO__atomic_nand_fetch:
981
40
    case AtomicExpr::AO__atomic_or_fetch:
982
40
    case AtomicExpr::AO__atomic_sub_fetch:
983
40
    case AtomicExpr::AO__atomic_xor_fetch:
984
40
    case AtomicExpr::AO__atomic_fetch_max:
985
40
    case AtomicExpr::AO__atomic_fetch_min:
986
40
    case AtomicExpr::AO__atomic_max_fetch:
987
40
    case AtomicExpr::AO__atomic_min_fetch:
988
      // For these, only library calls for certain sizes exist.
989
40
      UseOptimizedLibcall = true;
990
40
      break;
991
992
89
    case AtomicExpr::AO__atomic_load:
993
89
    case AtomicExpr::AO__atomic_store:
994
89
    case AtomicExpr::AO__atomic_exchange:
995
89
    case AtomicExpr::AO__atomic_compare_exchange:
996
      // Use the generic version if we don't know that the operand will be
997
      // suitably aligned for the optimized version.
998
89
      if (Misaligned)
999
63
        break;
1000
26
      LLVM_FALLTHROUGH;
1001
80
    case AtomicExpr::AO__c11_atomic_load:
1002
80
    case AtomicExpr::AO__c11_atomic_store:
1003
80
    case AtomicExpr::AO__c11_atomic_exchange:
1004
80
    case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1005
80
    case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1006
80
    case AtomicExpr::AO__opencl_atomic_load:
1007
80
    case AtomicExpr::AO__opencl_atomic_store:
1008
80
    case AtomicExpr::AO__opencl_atomic_exchange:
1009
80
    case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1010
80
    case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1011
80
    case AtomicExpr::AO__atomic_load_n:
1012
80
    case AtomicExpr::AO__atomic_store_n:
1013
80
    case AtomicExpr::AO__atomic_exchange_n:
1014
80
    case AtomicExpr::AO__atomic_compare_exchange_n:
1015
      // Only use optimized library calls for sizes for which they exist.
1016
      // FIXME: Size == 16 optimized library functions exist too.
1017
80
      if (Size == 1 || 
Size == 272
||
Size == 470
||
Size == 841
)
1018
63
        UseOptimizedLibcall = true;
1019
80
      break;
1020
183
    }
1021
1022
183
    CallArgList Args;
1023
183
    if (!UseOptimizedLibcall) {
1024
      // For non-optimized library calls, the size is the first parameter
1025
80
      Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
1026
80
               getContext().getSizeType());
1027
80
    }
1028
    // Atomic address is the first or second parameter
1029
    // The OpenCL atomic library functions only accept pointer arguments to
1030
    // generic address space.
1031
208
    auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
1032
208
      if (!E->isOpenCL())
1033
170
        return V;
1034
38
      auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
1035
38
      if (AS == LangAS::opencl_generic)
1036
21
        return V;
1037
17
      auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
1038
17
      auto T = V->getType();
1039
17
      auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
1040
1041
17
      return getTargetHooks().performAddrSpaceCast(
1042
17
          *this, V, AS, LangAS::opencl_generic, DestType, false);
1043
17
    };
1044
1045
183
    Args.add(RValue::get(CastToGenericAddrSpace(
1046
183
                 EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
1047
183
             getContext().VoidPtrTy);
1048
1049
183
    std::string LibCallName;
1050
183
    QualType LoweredMemTy =
1051
180
      MemTy->isPointerType() ? 
getContext().getIntPtrType()3
: MemTy;
1052
183
    QualType RetTy;
1053
183
    bool HaveRetTy = false;
1054
183
    llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
1055
183
    bool PostOpMinMax = false;
1056
183
    switch (E->getOp()) {
1057
0
    case AtomicExpr::AO__c11_atomic_init:
1058
0
    case AtomicExpr::AO__opencl_atomic_init:
1059
0
      llvm_unreachable("Already handled!");
1060
1061
    // There is only one libcall for compare an exchange, because there is no
1062
    // optimisation benefit possible from a libcall version of a weak compare
1063
    // and exchange.
1064
    // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1065
    //                                void *desired, int success, int failure)
1066
    // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
1067
    //                                  int success, int failure)
1068
25
    case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1069
25
    case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1070
25
    case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1071
25
    case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1072
25
    case AtomicExpr::AO__atomic_compare_exchange:
1073
25
    case AtomicExpr::AO__atomic_compare_exchange_n:
1074
25
      LibCallName = "__atomic_compare_exchange";
1075
25
      RetTy = getContext().BoolTy;
1076
25
      HaveRetTy = true;
1077
25
      Args.add(
1078
25
          RValue::get(CastToGenericAddrSpace(
1079
25
              EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
1080
25
          getContext().VoidPtrTy);
1081
25
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
1082
25
                        MemTy, E->getExprLoc(), TInfo.Width);
1083
25
      Args.add(RValue::get(Order), getContext().IntTy);
1084
25
      Order = OrderFail;
1085
25
      break;
1086
    // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1087
    //                        int order)
1088
    // T __atomic_exchange_N(T *mem, T val, int order)
1089
13
    case AtomicExpr::AO__c11_atomic_exchange:
1090
13
    case AtomicExpr::AO__opencl_atomic_exchange:
1091
13
    case AtomicExpr::AO__atomic_exchange_n:
1092
13
    case AtomicExpr::AO__atomic_exchange:
1093
13
      LibCallName = "__atomic_exchange";
1094
13
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1095
13
                        MemTy, E->getExprLoc(), TInfo.Width);
1096
13
      break;
1097
    // void __atomic_store(size_t size, void *mem, void *val, int order)
1098
    // void __atomic_store_N(T *mem, T val, int order)
1099
55
    case AtomicExpr::AO__c11_atomic_store:
1100
55
    case AtomicExpr::AO__opencl_atomic_store:
1101
55
    case AtomicExpr::AO__atomic_store:
1102
55
    case AtomicExpr::AO__atomic_store_n:
1103
55
      LibCallName = "__atomic_store";
1104
55
      RetTy = getContext().VoidTy;
1105
55
      HaveRetTy = true;
1106
55
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1107
55
                        MemTy, E->getExprLoc(), TInfo.Width);
1108
55
      break;
1109
    // void __atomic_load(size_t size, void *mem, void *return, int order)
1110
    // T __atomic_load_N(T *mem, int order)
1111
50
    case AtomicExpr::AO__c11_atomic_load:
1112
50
    case AtomicExpr::AO__opencl_atomic_load:
1113
50
    case AtomicExpr::AO__atomic_load:
1114
50
    case AtomicExpr::AO__atomic_load_n:
1115
50
      LibCallName = "__atomic_load";
1116
50
      break;
1117
    // T __atomic_add_fetch_N(T *mem, T val, int order)
1118
    // T __atomic_fetch_add_N(T *mem, T val, int order)
1119
1
    case AtomicExpr::AO__atomic_add_fetch:
1120
1
      PostOp = llvm::Instruction::Add;
1121
1
      LLVM_FALLTHROUGH;
1122
17
    case AtomicExpr::AO__c11_atomic_fetch_add:
1123
17
    case AtomicExpr::AO__opencl_atomic_fetch_add:
1124
17
    case AtomicExpr::AO__atomic_fetch_add:
1125
17
      LibCallName = "__atomic_fetch_add";
1126
17
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1127
17
                        LoweredMemTy, E->getExprLoc(), TInfo.Width);
1128
17
      break;
1129
    // T __atomic_and_fetch_N(T *mem, T val, int order)
1130
    // T __atomic_fetch_and_N(T *mem, T val, int order)
1131
1
    case AtomicExpr::AO__atomic_and_fetch:
1132
1
      PostOp = llvm::Instruction::And;
1133
1
      LLVM_FALLTHROUGH;
1134
2
    case AtomicExpr::AO__c11_atomic_fetch_and:
1135
2
    case AtomicExpr::AO__opencl_atomic_fetch_and:
1136
2
    case AtomicExpr::AO__atomic_fetch_and:
1137
2
      LibCallName = "__atomic_fetch_and";
1138
2
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1139
2
                        MemTy, E->getExprLoc(), TInfo.Width);
1140
2
      break;
1141
    // T __atomic_or_fetch_N(T *mem, T val, int order)
1142
    // T __atomic_fetch_or_N(T *mem, T val, int order)
1143
1
    case AtomicExpr::AO__atomic_or_fetch:
1144
1
      PostOp = llvm::Instruction::Or;
1145
1
      LLVM_FALLTHROUGH;
1146
2
    case AtomicExpr::AO__c11_atomic_fetch_or:
1147
2
    case AtomicExpr::AO__opencl_atomic_fetch_or:
1148
2
    case AtomicExpr::AO__atomic_fetch_or:
1149
2
      LibCallName = "__atomic_fetch_or";
1150
2
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1151
2
                        MemTy, E->getExprLoc(), TInfo.Width);
1152
2
      break;
1153
    // T __atomic_sub_fetch_N(T *mem, T val, int order)
1154
    // T __atomic_fetch_sub_N(T *mem, T val, int order)
1155
1
    case AtomicExpr::AO__atomic_sub_fetch:
1156
1
      PostOp = llvm::Instruction::Sub;
1157
1
      LLVM_FALLTHROUGH;
1158
9
    case AtomicExpr::AO__c11_atomic_fetch_sub:
1159
9
    case AtomicExpr::AO__opencl_atomic_fetch_sub:
1160
9
    case AtomicExpr::AO__atomic_fetch_sub:
1161
9
      LibCallName = "__atomic_fetch_sub";
1162
9
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1163
9
                        LoweredMemTy, E->getExprLoc(), TInfo.Width);
1164
9
      break;
1165
    // T __atomic_xor_fetch_N(T *mem, T val, int order)
1166
    // T __atomic_fetch_xor_N(T *mem, T val, int order)
1167
1
    case AtomicExpr::AO__atomic_xor_fetch:
1168
1
      PostOp = llvm::Instruction::Xor;
1169
1
      LLVM_FALLTHROUGH;
1170
2
    case AtomicExpr::AO__c11_atomic_fetch_xor:
1171
2
    case AtomicExpr::AO__opencl_atomic_fetch_xor:
1172
2
    case AtomicExpr::AO__atomic_fetch_xor:
1173
2
      LibCallName = "__atomic_fetch_xor";
1174
2
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1175
2
                        MemTy, E->getExprLoc(), TInfo.Width);
1176
2
      break;
1177
2
    case AtomicExpr::AO__atomic_min_fetch:
1178
2
      PostOpMinMax = true;
1179
2
      LLVM_FALLTHROUGH;
1180
6
    case AtomicExpr::AO__c11_atomic_fetch_min:
1181
6
    case AtomicExpr::AO__atomic_fetch_min:
1182
6
    case AtomicExpr::AO__opencl_atomic_fetch_min:
1183
6
      LibCallName = E->getValueType()->isSignedIntegerType()
1184
2
                        ? "__atomic_fetch_min"
1185
4
                        : "__atomic_fetch_umin";
1186
6
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1187
6
                        LoweredMemTy, E->getExprLoc(), TInfo.Width);
1188
6
      break;
1189
0
    case AtomicExpr::AO__atomic_max_fetch:
1190
0
      PostOpMinMax = true;
1191
0
      LLVM_FALLTHROUGH;
1192
0
    case AtomicExpr::AO__c11_atomic_fetch_max:
1193
0
    case AtomicExpr::AO__atomic_fetch_max:
1194
0
    case AtomicExpr::AO__opencl_atomic_fetch_max:
1195
0
      LibCallName = E->getValueType()->isSignedIntegerType()
1196
0
                        ? "__atomic_fetch_max"
1197
0
                        : "__atomic_fetch_umax";
1198
0
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1199
0
                        LoweredMemTy, E->getExprLoc(), TInfo.Width);
1200
0
      break;
1201
    // T __atomic_nand_fetch_N(T *mem, T val, int order)
1202
    // T __atomic_fetch_nand_N(T *mem, T val, int order)
1203
1
    case AtomicExpr::AO__atomic_nand_fetch:
1204
1
      PostOp = llvm::Instruction::And; // the NOT is special cased below
1205
1
      LLVM_FALLTHROUGH;
1206
2
    case AtomicExpr::AO__atomic_fetch_nand:
1207
2
      LibCallName = "__atomic_fetch_nand";
1208
2
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1209
2
                        MemTy, E->getExprLoc(), TInfo.Width);
1210
2
      break;
1211
183
    }
1212
1213
183
    if (E->isOpenCL()) {
1214
27
      LibCallName = std::string("__opencl") +
1215
27
          StringRef(LibCallName).drop_front(1).str();
1216
1217
27
    }
1218
    // Optimized functions have the size in their name.
1219
183
    if (UseOptimizedLibcall)
1220
103
      LibCallName += "_" + llvm::utostr(Size);
1221
    // By default, assume we return a value of the atomic type.
1222
183
    if (!HaveRetTy) {
1223
103
      if (UseOptimizedLibcall) {
1224
        // Value is returned directly.
1225
        // The function returns an appropriately sized integer type.
1226
63
        RetTy = getContext().getIntTypeForBitwidth(
1227
63
            getContext().toBits(TInfo.Width), /*Signed=*/false);
1228
40
      } else {
1229
        // Value is returned through parameter before the order.
1230
40
        RetTy = getContext().VoidTy;
1231
40
        Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1232
40
                 getContext().VoidPtrTy);
1233
40
      }
1234
103
    }
1235
    // order is always the last parameter
1236
183
    Args.add(RValue::get(Order),
1237
183
             getContext().IntTy);
1238
183
    if (E->isOpenCL())
1239
27
      Args.add(RValue::get(Scope), getContext().IntTy);
1240
1241
    // PostOp is only needed for the atomic_*_fetch operations, and
1242
    // thus is only needed for and implemented in the
1243
    // UseOptimizedLibcall codepath.
1244
183
    assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
1245
1246
183
    RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1247
    // The value is returned directly from the libcall.
1248
183
    if (E->isCmpXChg())
1249
25
      return Res;
1250
1251
    // The value is returned directly for optimized libcalls but the expr
1252
    // provided an out-param.
1253
158
    if (UseOptimizedLibcall && 
Res.getScalarVal()91
) {
1254
63
      llvm::Value *ResVal = Res.getScalarVal();
1255
63
      if (PostOpMinMax) {
1256
2
        llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1257
2
        ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
1258
2
                                      E->getValueType()->isSignedIntegerType(),
1259
2
                                      ResVal, LoadVal1);
1260
61
      } else if (PostOp) {
1261
6
        llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1262
6
        ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1263
6
      }
1264
63
      if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1265
1
        ResVal = Builder.CreateNot(ResVal);
1266
1267
63
      Builder.CreateStore(
1268
63
          ResVal,
1269
63
          Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1270
63
    }
1271
1272
158
    if (RValTy->isVoidType())
1273
100
      return RValue::get(nullptr);
1274
1275
58
    return convertTempToRValue(
1276
58
        Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1277
58
        RValTy, E->getExprLoc());
1278
58
  }
1279
1280
793
  bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1281
686
                 E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1282
676
                 E->getOp() == AtomicExpr::AO__atomic_store ||
1283
627
                 E->getOp() == AtomicExpr::AO__atomic_store_n;
1284
793
  bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1285
688
                E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1286
672
                E->getOp() == AtomicExpr::AO__atomic_load ||
1287
621
                E->getOp() == AtomicExpr::AO__atomic_load_n;
1288
1289
793
  if (isa<llvm::ConstantInt>(Order)) {
1290
481
    auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1291
    // We should not ever get to a case where the ordering isn't a valid C ABI
1292
    // value, but it's hard to enforce that in general.
1293
481
    if (llvm::isValidAtomicOrderingCABI(ord))
1294
481
      switch ((llvm::AtomicOrderingCABI)ord) {
1295
56
      case llvm::AtomicOrderingCABI::relaxed:
1296
56
        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1297
56
                     llvm::AtomicOrdering::Monotonic, Scope);
1298
56
        break;
1299
42
      case llvm::AtomicOrderingCABI::consume:
1300
42
      case llvm::AtomicOrderingCABI::acquire:
1301
42
        if (IsStore)
1302
0
          break; // Avoid crashing on code with undefined behavior
1303
42
        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1304
42
                     llvm::AtomicOrdering::Acquire, Scope);
1305
42
        break;
1306
20
      case llvm::AtomicOrderingCABI::release:
1307
20
        if (IsLoad)
1308
0
          break; // Avoid crashing on code with undefined behavior
1309
20
        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1310
20
                     llvm::AtomicOrdering::Release, Scope);
1311
20
        break;
1312
51
      case llvm::AtomicOrderingCABI::acq_rel:
1313
51
        if (IsLoad || IsStore)
1314
0
          break; // Avoid crashing on code with undefined behavior
1315
51
        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1316
51
                     llvm::AtomicOrdering::AcquireRelease, Scope);
1317
51
        break;
1318
312
      case llvm::AtomicOrderingCABI::seq_cst:
1319
312
        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1320
312
                     llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1321
312
        break;
1322
481
      }
1323
481
    if (RValTy->isVoidType())
1324
140
      return RValue::get(nullptr);
1325
1326
341
    return convertTempToRValue(
1327
341
        Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1328
341
                                        Dest.getAddressSpace())),
1329
341
        RValTy, E->getExprLoc());
1330
341
  }
1331
1332
  // Long case, when Order isn't obviously constant.
1333
1334
  // Create all the relevant BB's
1335
312
  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1336
312
                   *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1337
312
                   *SeqCstBB = nullptr;
1338
312
  MonotonicBB = createBasicBlock("monotonic", CurFn);
1339
312
  if (!IsStore)
1340
225
    AcquireBB = createBasicBlock("acquire", CurFn);
1341
312
  if (!IsLoad)
1342
233
    ReleaseBB = createBasicBlock("release", CurFn);
1343
312
  if (!IsLoad && 
!IsStore233
)
1344
146
    AcqRelBB = createBasicBlock("acqrel", CurFn);
1345
312
  SeqCstBB = createBasicBlock("seqcst", CurFn);
1346
312
  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1347
1348
  // Create the switch for the split
1349
  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1350
  // doesn't matter unless someone is crazy enough to use something that
1351
  // doesn't fold to a constant for the ordering.
1352
312
  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1353
312
  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1354
1355
  // Emit all the different atomics
1356
312
  Builder.SetInsertPoint(MonotonicBB);
1357
312
  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1358
312
               llvm::AtomicOrdering::Monotonic, Scope);
1359
312
  Builder.CreateBr(ContBB);
1360
312
  if (!IsStore) {
1361
225
    Builder.SetInsertPoint(AcquireBB);
1362
225
    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1363
225
                 llvm::AtomicOrdering::Acquire, Scope);
1364
225
    Builder.CreateBr(ContBB);
1365
225
    SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1366
225
                AcquireBB);
1367
225
    SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1368
225
                AcquireBB);
1369
225
  }
1370
312
  if (!IsLoad) {
1371
233
    Builder.SetInsertPoint(ReleaseBB);
1372
233
    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1373
233
                 llvm::AtomicOrdering::Release, Scope);
1374
233
    Builder.CreateBr(ContBB);
1375
233
    SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1376
233
                ReleaseBB);
1377
233
  }
1378
312
  if (!IsLoad && 
!IsStore233
) {
1379
146
    Builder.SetInsertPoint(AcqRelBB);
1380
146
    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1381
146
                 llvm::AtomicOrdering::AcquireRelease, Scope);
1382
146
    Builder.CreateBr(ContBB);
1383
146
    SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1384
146
                AcqRelBB);
1385
146
  }
1386
312
  Builder.SetInsertPoint(SeqCstBB);
1387
312
  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1388
312
               llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1389
312
  Builder.CreateBr(ContBB);
1390
312
  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1391
312
              SeqCstBB);
1392
1393
  // Cleanup and return
1394
312
  Builder.SetInsertPoint(ContBB);
1395
312
  if (RValTy->isVoidType())
1396
87
    return RValue::get(nullptr);
1397
1398
225
  assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1399
225
  return convertTempToRValue(
1400
225
      Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1401
225
                                      Dest.getAddressSpace())),
1402
225
      RValTy, E->getExprLoc());
1403
225
}
1404
1405
3.48k
Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1406
3.48k
  unsigned addrspace =
1407
3.48k
    cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1408
3.48k
  llvm::IntegerType *ty =
1409
3.48k
    llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1410
3.48k
  return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1411
3.48k
}
1412
1413
804
Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1414
804
  llvm::Type *Ty = Addr.getElementType();
1415
804
  uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1416
804
  if (SourceSizeInBits != AtomicSizeInBits) {
1417
9
    Address Tmp = CreateTempAlloca();
1418
9
    CGF.Builder.CreateMemCpy(Tmp, Addr,
1419
9
                             std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1420
9
    Addr = Tmp;
1421
9
  }
1422
1423
804
  return emitCastToAtomicIntPointer(Addr);
1424
804
}
1425
1426
RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1427
                                             AggValueSlot resultSlot,
1428
                                             SourceLocation loc,
1429
157
                                             bool asValue) const {
1430
157
  if (LVal.isSimple()) {
1431
125
    if (EvaluationKind == TEK_Aggregate)
1432
11
      return resultSlot.asRValue();
1433
1434
    // Drill into the padding structure if we have one.
1435
114
    if (hasPadding())
1436
0
      addr = CGF.Builder.CreateStructGEP(addr, 0);
1437
1438
    // Otherwise, just convert the temporary to an r-value using the
1439
    // normal conversion routine.
1440
114
    return CGF.convertTempToRValue(addr, getValueType(), loc);
1441
114
  }
1442
32
  if (!asValue)
1443
    // Get RValue from temp memory as atomic for non-simple lvalues
1444
8
    return RValue::get(CGF.Builder.CreateLoad(addr));
1445
24
  if (LVal.isBitField())
1446
20
    return CGF.EmitLoadOfBitfieldLValue(
1447
20
        LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1448
20
                             LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1449
4
  if (LVal.isVectorElt())
1450
2
    return CGF.EmitLoadOfLValue(
1451
2
        LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1452
2
                              LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1453
2
  assert(LVal.isExtVectorElt());
1454
2
  return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1455
2
      addr, LVal.getExtVectorElts(), LVal.getType(),
1456
2
      LVal.getBaseInfo(), TBAAAccessInfo()));
1457
2
}
1458
1459
RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1460
                                             AggValueSlot ResultSlot,
1461
                                             SourceLocation Loc,
1462
350
                                             bool AsValue) const {
1463
  // Try not to in some easy cases.
1464
350
  assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1465
350
  if (getEvaluationKind() == TEK_Scalar &&
1466
340
      (((!LVal.isBitField() ||
1467
48
         LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1468
292
        !hasPadding()) ||
1469
320
       
!AsValue60
)) {
1470
320
    auto *ValTy = AsValue
1471
103
                      ? CGF.ConvertTypeForMem(ValueTy)
1472
217
                      : getAtomicAddress().getType()->getPointerElementType();
1473
320
    if (ValTy->isIntegerTy()) {
1474
229
      assert(IntVal->getType() == ValTy && "Different integer types.");
1475
229
      return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1476
91
    } else if (ValTy->isPointerTy())
1477
0
      return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1478
91
    else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1479
63
      return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1480
58
  }
1481
1482
  // Create a temporary.  This needs to be big enough to hold the
1483
  // atomic integer.
1484
58
  Address Temp = Address::invalid();
1485
58
  bool TempIsVolatile = false;
1486
58
  if (AsValue && 
getEvaluationKind() == TEK_Aggregate44
) {
1487
8
    assert(!ResultSlot.isIgnored());
1488
8
    Temp = ResultSlot.getAddress();
1489
8
    TempIsVolatile = ResultSlot.isVolatile();
1490
50
  } else {
1491
50
    Temp = CreateTempAlloca();
1492
50
  }
1493
1494
  // Slam the integer into the temporary.
1495
58
  Address CastTemp = emitCastToAtomicIntPointer(Temp);
1496
58
  CGF.Builder.CreateStore(IntVal, CastTemp)
1497
58
      ->setVolatile(TempIsVolatile);
1498
1499
58
  return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1500
58
}
1501
1502
void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1503
86
                                       llvm::AtomicOrdering AO, bool) {
1504
  // void __atomic_load(size_t size, void *mem, void *return, int order);
1505
86
  CallArgList Args;
1506
86
  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1507
86
  Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1508
86
           CGF.getContext().VoidPtrTy);
1509
86
  Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1510
86
           CGF.getContext().VoidPtrTy);
1511
86
  Args.add(
1512
86
      RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1513
86
      CGF.getContext().IntTy);
1514
86
  emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1515
86
}
1516
1517
llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1518
358
                                          bool IsVolatile) {
1519
  // Okay, we're doing this natively.
1520
358
  Address Addr = getAtomicAddressAsAtomicIntPointer();
1521
358
  llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1522
358
  Load->setAtomic(AO);
1523
1524
  // Other decoration.
1525
358
  if (IsVolatile)
1526
12
    Load->setVolatile(true);
1527
358
  CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1528
358
  return Load;
1529
358
}
1530
1531
/// An LValue is a candidate for having its loads and stores be made atomic if
1532
/// we are operating under /volatile:ms *and* the LValue itself is volatile and
1533
/// performing such an operation can be performed without a libcall.
1534
1.15M
bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1535
1.15M
  if (!CGM.getCodeGenOpts().MSVolatile) 
return false1.15M
;
1536
45
  AtomicInfo AI(*this, LV);
1537
45
  bool IsVolatile = LV.isVolatile() || 
hasVolatileMember(LV.getType())27
;
1538
  // An atomic is inline if we don't need to use a libcall.
1539
45
  bool AtomicIsInline = !AI.shouldUseLibcall();
1540
  // MSVC doesn't seem to do this for types wider than a pointer.
1541
45
  if (getContext().getTypeSize(LV.getType()) >
1542
45
      getContext().getTypeSize(getContext().getIntPtrType()))
1543
5
    return false;
1544
40
  return IsVolatile && 
AtomicIsInline15
;
1545
40
}
1546
1547
RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1548
69
                                       AggValueSlot Slot) {
1549
69
  llvm::AtomicOrdering AO;
1550
69
  bool IsVolatile = LV.isVolatileQualified();
1551
69
  if (LV.getType()->isAtomicType()) {
1552
62
    AO = llvm::AtomicOrdering::SequentiallyConsistent;
1553
7
  } else {
1554
7
    AO = llvm::AtomicOrdering::Acquire;
1555
7
    IsVolatile = true;
1556
7
  }
1557
69
  return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1558
69
}
1559
1560
RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1561
                                  bool AsValue, llvm::AtomicOrdering AO,
1562
175
                                  bool IsVolatile) {
1563
  // Check whether we should use a library call.
1564
175
  if (shouldUseLibcall()) {
1565
42
    Address TempAddr = Address::invalid();
1566
42
    if (LVal.isSimple() && 
!ResultSlot.isIgnored()38
) {
1567
3
      assert(getEvaluationKind() == TEK_Aggregate);
1568
3
      TempAddr = ResultSlot.getAddress();
1569
3
    } else
1570
39
      TempAddr = CreateTempAlloca();
1571
1572
42
    EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1573
1574
    // Okay, turn that back into the original value or whole atomic (for
1575
    // non-simple lvalues) type.
1576
42
    return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1577
42
  }
1578
1579
  // Okay, we're doing this natively.
1580
133
  auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1581
1582
  // If we're ignoring an aggregate return, don't do anything.
1583
133
  if (getEvaluationKind() == TEK_Aggregate && 
ResultSlot.isIgnored()8
)
1584
0
    return RValue::getAggregate(Address::invalid(), false);
1585
1586
  // Okay, turn that back into the original value or atomic (for non-simple
1587
  // lvalues) type.
1588
133
  return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1589
133
}
1590
1591
/// Emit a load from an l-value of atomic type.  Note that the r-value
1592
/// we produce is an r-value of the atomic *value* type.
1593
RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1594
                                       llvm::AtomicOrdering AO, bool IsVolatile,
1595
175
                                       AggValueSlot resultSlot) {
1596
175
  AtomicInfo Atomics(*this, src);
1597
175
  return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1598
175
                                IsVolatile);
1599
175
}
1600
1601
/// Copy an r-value into memory as part of storing to an atomic type.
1602
/// This needs to create a bit-pattern suitable for atomic operations.
1603
161
void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1604
161
  assert(LVal.isSimple());
1605
  // If we have an r-value, the rvalue should be of the atomic type,
1606
  // which means that the caller is responsible for having zeroed
1607
  // any padding.  Just do an aggregate copy of that type.
1608
161
  if (rvalue.isAggregate()) {
1609
0
    LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1610
0
    LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1611
0
                                    getAtomicType());
1612
0
    bool IsVolatile = rvalue.isVolatileQualified() ||
1613
0
                      LVal.isVolatileQualified();
1614
0
    CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1615
0
                          AggValueSlot::DoesNotOverlap, IsVolatile);
1616
0
    return;
1617
0
  }
1618
1619
  // Okay, otherwise we're copying stuff.
1620
1621
  // Zero out the buffer if necessary.
1622
161
  emitMemSetZeroIfNecessary();
1623
1624
  // Drill past the padding if present.
1625
161
  LValue TempLVal = projectValue();
1626
1627
  // Okay, store the rvalue in.
1628
161
  if (rvalue.isScalar()) {
1629
143
    CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1630
18
  } else {
1631
18
    CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1632
18
  }
1633
161
}
1634
1635
1636
/// Materialize an r-value into memory for the purposes of storing it
1637
/// to an atomic type.
1638
129
Address AtomicInfo::materializeRValue(RValue rvalue) const {
1639
  // Aggregate r-values are already in memory, and EmitAtomicStore
1640
  // requires them to be values of the atomic type.
1641
129
  if (rvalue.isAggregate())
1642
12
    return rvalue.getAggregateAddress();
1643
1644
  // Otherwise, make a temporary and materialize into it.
1645
117
  LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1646
117
  AtomicInfo Atomics(CGF, TempLV);
1647
117
  Atomics.emitCopyIntoMemory(rvalue);
1648
117
  return TempLV.getAddress(CGF);
1649
117
}
1650
1651
144
llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1652
  // If we've got a scalar value of the right size, try to avoid going
1653
  // through memory.
1654
144
  if (RVal.isScalar() && 
(133
!hasPadding()133
||
!LVal.isSimple()0
)) {
1655
133
    llvm::Value *Value = RVal.getScalarVal();
1656
133
    if (isa<llvm::IntegerType>(Value->getType()))
1657
103
      return CGF.EmitToMemory(Value, ValueTy);
1658
30
    else {
1659
30
      llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1660
30
          CGF.getLLVMContext(),
1661
30
          LVal.isSimple() ? getValueSizeInBits() : 
getAtomicSizeInBits()0
);
1662
30
      if (isa<llvm::PointerType>(Value->getType()))
1663
0
        return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1664
30
      else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1665
12
        return CGF.Builder.CreateBitCast(Value, InputIntTy);
1666
29
    }
1667
133
  }
1668
  // Otherwise, we need to go through memory.
1669
  // Put the r-value in memory.
1670
29
  Address Addr = materializeRValue(RVal);
1671
1672
  // Cast the temporary to the atomic int type and pull a value out.
1673
29
  Addr = emitCastToAtomicIntPointer(Addr);
1674
29
  return CGF.Builder.CreateLoad(Addr);
1675
29
}
1676
1677
std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1678
    llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1679
237
    llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1680
  // Do the atomic store.
1681
237
  Address Addr = getAtomicAddressAsAtomicIntPointer();
1682
237
  auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1683
237
                                               ExpectedVal, DesiredVal,
1684
237
                                               Success, Failure);
1685
  // Other decoration.
1686
237
  Inst->setVolatile(LVal.isVolatileQualified());
1687
237
  Inst->setWeak(IsWeak);
1688
1689
  // Okay, turn that back into the original value type.
1690
237
  auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1691
237
  auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1692
237
  return std::make_pair(PreviousVal, SuccessFailureVal);
1693
237
}
1694
1695
llvm::Value *
1696
AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1697
                                             llvm::Value *DesiredAddr,
1698
                                             llvm::AtomicOrdering Success,
1699
61
                                             llvm::AtomicOrdering Failure) {
1700
  // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1701
  // void *desired, int success, int failure);
1702
61
  CallArgList Args;
1703
61
  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1704
61
  Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1705
61
           CGF.getContext().VoidPtrTy);
1706
61
  Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1707
61
           CGF.getContext().VoidPtrTy);
1708
61
  Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1709
61
           CGF.getContext().VoidPtrTy);
1710
61
  Args.add(RValue::get(
1711
61
               llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1712
61
           CGF.getContext().IntTy);
1713
61
  Args.add(RValue::get(
1714
61
               llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1715
61
           CGF.getContext().IntTy);
1716
61
  auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1717
61
                                              CGF.getContext().BoolTy, Args);
1718
1719
61
  return SuccessFailureRVal.getScalarVal();
1720
61
}
1721
1722
std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1723
    RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1724
29
    llvm::AtomicOrdering Failure, bool IsWeak) {
1725
29
  if (isStrongerThan(Failure, Success))
1726
    // Don't assert on undefined behavior "failure argument shall be no stronger
1727
    // than the success argument".
1728
0
    Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1729
1730
  // Check whether we should use a library call.
1731
29
  if (shouldUseLibcall()) {
1732
    // Produce a source address.
1733
17
    Address ExpectedAddr = materializeRValue(Expected);
1734
17
    Address DesiredAddr = materializeRValue(Desired);
1735
17
    auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1736
17
                                                 DesiredAddr.getPointer(),
1737
17
                                                 Success, Failure);
1738
17
    return std::make_pair(
1739
17
        convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1740
17
                                  SourceLocation(), /*AsValue=*/false),
1741
17
        Res);
1742
17
  }
1743
1744
  // If we've got a scalar value of the right size, try to avoid going
1745
  // through memory.
1746
12
  auto *ExpectedVal = convertRValueToInt(Expected);
1747
12
  auto *DesiredVal = convertRValueToInt(Desired);
1748
12
  auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1749
12
                                         Failure, IsWeak);
1750
12
  return std::make_pair(
1751
12
      ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1752
12
                                SourceLocation(), /*AsValue=*/false),
1753
12
      Res.second);
1754
12
}
1755
1756
static void
1757
EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1758
                      const llvm::function_ref<RValue(RValue)> &UpdateOp,
1759
245
                      Address DesiredAddr) {
1760
245
  RValue UpRVal;
1761
245
  LValue AtomicLVal = Atomics.getAtomicLValue();
1762
245
  LValue DesiredLVal;
1763
245
  if (AtomicLVal.isSimple()) {
1764
197
    UpRVal = OldRVal;
1765
197
    DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1766
48
  } else {
1767
    // Build new lvalue for temp address.
1768
48
    Address Ptr = Atomics.materializeRValue(OldRVal);
1769
48
    LValue UpdateLVal;
1770
48
    if (AtomicLVal.isBitField()) {
1771
40
      UpdateLVal =
1772
40
          LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1773
40
                               AtomicLVal.getType(),
1774
40
                               AtomicLVal.getBaseInfo(),
1775
40
                               AtomicLVal.getTBAAInfo());
1776
40
      DesiredLVal =
1777
40
          LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1778
40
                               AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1779
40
                               AtomicLVal.getTBAAInfo());
1780
8
    } else if (AtomicLVal.isVectorElt()) {
1781
4
      UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1782
4
                                         AtomicLVal.getType(),
1783
4
                                         AtomicLVal.getBaseInfo(),
1784
4
                                         AtomicLVal.getTBAAInfo());
1785
4
      DesiredLVal = LValue::MakeVectorElt(
1786
4
          DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1787
4
          AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1788
4
    } else {
1789
4
      assert(AtomicLVal.isExtVectorElt());
1790
4
      UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1791
4
                                            AtomicLVal.getType(),
1792
4
                                            AtomicLVal.getBaseInfo(),
1793
4
                                            AtomicLVal.getTBAAInfo());
1794
4
      DesiredLVal = LValue::MakeExtVectorElt(
1795
4
          DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1796
4
          AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1797
4
    }
1798
48
    UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1799
48
  }
1800
  // Store new value in the corresponding memory area.
1801
245
  RValue NewRVal = UpdateOp(UpRVal);
1802
245
  if (NewRVal.isScalar()) {
1803
223
    CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1804
22
  } else {
1805
22
    assert(NewRVal.isComplex());
1806
22
    CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1807
22
                           /*isInit=*/false);
1808
22
  }
1809
245
}
1810
1811
void AtomicInfo::EmitAtomicUpdateLibcall(
1812
    llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1813
40
    bool IsVolatile) {
1814
40
  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1815
1816
40
  Address ExpectedAddr = CreateTempAlloca();
1817
1818
40
  EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1819
40
  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1820
40
  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1821
40
  CGF.EmitBlock(ContBB);
1822
40
  Address DesiredAddr = CreateTempAlloca();
1823
40
  if ((LVal.isBitField() && 
BFI.Size != ValueSizeInBits8
) ||
1824
32
      requiresMemSetZero(getAtomicAddress().getElementType())) {
1825
8
    auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1826
8
    CGF.Builder.CreateStore(OldVal, DesiredAddr);
1827
8
  }
1828
40
  auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1829
40
                                           AggValueSlot::ignored(),
1830
40
                                           SourceLocation(), /*AsValue=*/false);
1831
40
  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1832
40
  auto *Res =
1833
40
      EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1834
40
                                       DesiredAddr.getPointer(),
1835
40
                                       AO, Failure);
1836
40
  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1837
40
  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1838
40
}
1839
1840
void AtomicInfo::EmitAtomicUpdateOp(
1841
    llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1842
205
    bool IsVolatile) {
1843
205
  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1844
1845
  // Do the atomic load.
1846
205
  auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1847
  // For non-simple lvalues perform compare-and-swap procedure.
1848
205
  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1849
205
  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1850
205
  auto *CurBB = CGF.Builder.GetInsertBlock();
1851
205
  CGF.EmitBlock(ContBB);
1852
205
  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1853
205
                                             /*NumReservedValues=*/2);
1854
205
  PHI->addIncoming(OldVal, CurBB);
1855
205
  Address NewAtomicAddr = CreateTempAlloca();
1856
205
  Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1857
205
  if ((LVal.isBitField() && 
BFI.Size != ValueSizeInBits32
) ||
1858
173
      requiresMemSetZero(getAtomicAddress().getElementType())) {
1859
48
    CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1860
48
  }
1861
205
  auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1862
205
                                           SourceLocation(), /*AsValue=*/false);
1863
205
  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1864
205
  auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1865
  // Try to write new value using cmpxchg operation.
1866
205
  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1867
205
  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1868
205
  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1869
205
  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1870
205
}
1871
1872
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1873
24
                                  RValue UpdateRVal, Address DesiredAddr) {
1874
24
  LValue AtomicLVal = Atomics.getAtomicLValue();
1875
24
  LValue DesiredLVal;
1876
  // Build new lvalue for temp address.
1877
24
  if (AtomicLVal.isBitField()) {
1878
20
    DesiredLVal =
1879
20
        LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1880
20
                             AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1881
20
                             AtomicLVal.getTBAAInfo());
1882
4
  } else if (AtomicLVal.isVectorElt()) {
1883
2
    DesiredLVal =
1884
2
        LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1885
2
                              AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1886
2
                              AtomicLVal.getTBAAInfo());
1887
2
  } else {
1888
2
    assert(AtomicLVal.isExtVectorElt());
1889
2
    DesiredLVal = LValue::MakeExtVectorElt(
1890
2
        DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1891
2
        AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1892
2
  }
1893
  // Store new value in the corresponding memory area.
1894
24
  assert(UpdateRVal.isScalar());
1895
24
  CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1896
24
}
1897
1898
void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1899
4
                                         RValue UpdateRVal, bool IsVolatile) {
1900
4
  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1901
1902
4
  Address ExpectedAddr = CreateTempAlloca();
1903
1904
4
  EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1905
4
  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1906
4
  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1907
4
  CGF.EmitBlock(ContBB);
1908
4
  Address DesiredAddr = CreateTempAlloca();
1909
4
  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1910
4
      
requiresMemSetZero(getAtomicAddress().getElementType())0
) {
1911
4
    auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1912
4
    CGF.Builder.CreateStore(OldVal, DesiredAddr);
1913
4
  }
1914
4
  EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1915
4
  auto *Res =
1916
4
      EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1917
4
                                       DesiredAddr.getPointer(),
1918
4
                                       AO, Failure);
1919
4
  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1920
4
  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1921
4
}
1922
1923
void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1924
20
                                    bool IsVolatile) {
1925
20
  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1926
1927
  // Do the atomic load.
1928
20
  auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1929
  // For non-simple lvalues perform compare-and-swap procedure.
1930
20
  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1931
20
  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1932
20
  auto *CurBB = CGF.Builder.GetInsertBlock();
1933
20
  CGF.EmitBlock(ContBB);
1934
20
  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1935
20
                                             /*NumReservedValues=*/2);
1936
20
  PHI->addIncoming(OldVal, CurBB);
1937
20
  Address NewAtomicAddr = CreateTempAlloca();
1938
20
  Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1939
20
  if ((LVal.isBitField() && 
BFI.Size != ValueSizeInBits16
) ||
1940
20
      
requiresMemSetZero(getAtomicAddress().getElementType())4
) {
1941
20
    CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1942
20
  }
1943
20
  EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1944
20
  auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1945
  // Try to write new value using cmpxchg operation.
1946
20
  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1947
20
  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1948
20
  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1949
20
  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1950
20
}
1951
1952
void AtomicInfo::EmitAtomicUpdate(
1953
    llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1954
245
    bool IsVolatile) {
1955
245
  if (shouldUseLibcall()) {
1956
40
    EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1957
205
  } else {
1958
205
    EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1959
205
  }
1960
245
}
1961
1962
void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1963
24
                                  bool IsVolatile) {
1964
24
  if (shouldUseLibcall()) {
1965
4
    EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1966
20
  } else {
1967
20
    EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1968
20
  }
1969
24
}
1970
1971
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1972
83
                                      bool isInit) {
1973
83
  bool IsVolatile = lvalue.isVolatileQualified();
1974
83
  llvm::AtomicOrdering AO;
1975
83
  if (lvalue.getType()->isAtomicType()) {
1976
76
    AO = llvm::AtomicOrdering::SequentiallyConsistent;
1977
7
  } else {
1978
7
    AO = llvm::AtomicOrdering::Release;
1979
7
    IsVolatile = true;
1980
7
  }
1981
83
  return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1982
83
}
1983
1984
/// Emit a store to an l-value of atomic type.
1985
///
1986
/// Note that the r-value is expected to be an r-value *of the atomic
1987
/// type*; this means that for aggregate r-values, it should include
1988
/// storage for any padding that was necessary.
1989
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1990
                                      llvm::AtomicOrdering AO, bool IsVolatile,
1991
195
                                      bool isInit) {
1992
  // If this is an aggregate r-value, it should agree in type except
1993
  // maybe for address-space qualification.
1994
195
  assert(!rvalue.isAggregate() ||
1995
195
         rvalue.getAggregateAddress().getElementType() ==
1996
195
             dest.getAddress(*this).getElementType());
1997
1998
195
  AtomicInfo atomics(*this, dest);
1999
195
  LValue LVal = atomics.getAtomicLValue();
2000
2001
  // If this is an initialization, just put the value there normally.
2002
195
  if (LVal.isSimple()) {
2003
171
    if (isInit) {
2004
33
      atomics.emitCopyIntoMemory(rvalue);
2005
33
      return;
2006
33
    }
2007
2008
    // Check whether we should use a library call.
2009
138
    if (atomics.shouldUseLibcall()) {
2010
      // Produce a source address.
2011
18
      Address srcAddr = atomics.materializeRValue(rvalue);
2012
2013
      // void __atomic_store(size_t size, void *mem, void *val, int order)
2014
18
      CallArgList args;
2015
18
      args.add(RValue::get(atomics.getAtomicSizeValue()),
2016
18
               getContext().getSizeType());
2017
18
      args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
2018
18
               getContext().VoidPtrTy);
2019
18
      args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
2020
18
               getContext().VoidPtrTy);
2021
18
      args.add(
2022
18
          RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
2023
18
          getContext().IntTy);
2024
18
      emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
2025
18
      return;
2026
18
    }
2027
2028
    // Okay, we're doing this natively.
2029
120
    llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
2030
2031
    // Do the atomic store.
2032
120
    Address addr =
2033
120
        atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
2034
120
    intValue = Builder.CreateIntCast(
2035
120
        intValue, addr.getElementType(), /*isSigned=*/false);
2036
120
    llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
2037
2038
120
    if (AO == llvm::AtomicOrdering::Acquire)
2039
0
      AO = llvm::AtomicOrdering::Monotonic;
2040
120
    else if (AO == llvm::AtomicOrdering::AcquireRelease)
2041
0
      AO = llvm::AtomicOrdering::Release;
2042
    // Initializations don't need to be atomic.
2043
120
    if (!isInit)
2044
120
      store->setAtomic(AO);
2045
2046
    // Other decoration.
2047
120
    if (IsVolatile)
2048
10
      store->setVolatile(true);
2049
120
    CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
2050
120
    return;
2051
120
  }
2052
2053
  // Emit simple atomic update operation.
2054
24
  atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2055
24
}
2056
2057
/// Emit a compare-and-exchange op for atomic type.
2058
///
2059
std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
2060
    LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
2061
    llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
2062
29
    AggValueSlot Slot) {
2063
  // If this is an aggregate r-value, it should agree in type except
2064
  // maybe for address-space qualification.
2065
29
  assert(!Expected.isAggregate() ||
2066
29
         Expected.getAggregateAddress().getElementType() ==
2067
29
             Obj.getAddress(*this).getElementType());
2068
29
  assert(!Desired.isAggregate() ||
2069
29
         Desired.getAggregateAddress().getElementType() ==
2070
29
             Obj.getAddress(*this).getElementType());
2071
29
  AtomicInfo Atomics(*this, Obj);
2072
2073
29
  return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2074
29
                                           IsWeak);
2075
29
}
2076
2077
void CodeGenFunction::EmitAtomicUpdate(
2078
    LValue LVal, llvm::AtomicOrdering AO,
2079
245
    const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2080
245
  AtomicInfo Atomics(*this, LVal);
2081
245
  Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2082
245
}
2083
2084
20
void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
2085
20
  AtomicInfo atomics(*this, dest);
2086
2087
20
  switch (atomics.getEvaluationKind()) {
2088
9
  case TEK_Scalar: {
2089
9
    llvm::Value *value = EmitScalarExpr(init);
2090
9
    atomics.emitCopyIntoMemory(RValue::get(value));
2091
9
    return;
2092
0
  }
2093
2094
2
  case TEK_Complex: {
2095
2
    ComplexPairTy value = EmitComplexExpr(init);
2096
2
    atomics.emitCopyIntoMemory(RValue::getComplex(value));
2097
2
    return;
2098
0
  }
2099
2100
9
  case TEK_Aggregate: {
2101
    // Fix up the destination if the initializer isn't an expression
2102
    // of atomic type.
2103
9
    bool Zeroed = false;
2104
9
    if (!init->getType()->isAtomicType()) {
2105
5
      Zeroed = atomics.emitMemSetZeroIfNecessary();
2106
5
      dest = atomics.projectValue();
2107
5
    }
2108
2109
    // Evaluate the expression directly into the destination.
2110
9
    AggValueSlot slot = AggValueSlot::forLValue(
2111
9
        dest, *this, AggValueSlot::IsNotDestructed,
2112
9
        AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
2113
9
        AggValueSlot::DoesNotOverlap,
2114
7
        Zeroed ? 
AggValueSlot::IsZeroed2
: AggValueSlot::IsNotZeroed);
2115
2116
9
    EmitAggExpr(init, slot);
2117
9
    return;
2118
0
  }
2119
0
  }
2120
0
  llvm_unreachable("bad evaluation kind");
2121
0
}