Coverage Report

Created: 2020-02-25 14:32

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file contains the code for emitting atomic operations.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CGCall.h"
14
#include "CGRecordLayout.h"
15
#include "CodeGenFunction.h"
16
#include "CodeGenModule.h"
17
#include "TargetInfo.h"
18
#include "clang/AST/ASTContext.h"
19
#include "clang/CodeGen/CGFunctionInfo.h"
20
#include "clang/Frontend/FrontendDiagnostic.h"
21
#include "llvm/ADT/DenseMap.h"
22
#include "llvm/IR/DataLayout.h"
23
#include "llvm/IR/Intrinsics.h"
24
#include "llvm/IR/Operator.h"
25
26
using namespace clang;
27
using namespace CodeGen;
28
29
namespace {
30
  class AtomicInfo {
31
    CodeGenFunction &CGF;
32
    QualType AtomicTy;
33
    QualType ValueTy;
34
    uint64_t AtomicSizeInBits;
35
    uint64_t ValueSizeInBits;
36
    CharUnits AtomicAlign;
37
    CharUnits ValueAlign;
38
    TypeEvaluationKind EvaluationKind;
39
    bool UseLibcall;
40
    LValue LVal;
41
    CGBitFieldInfo BFI;
42
  public:
43
    AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44
        : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45
1.78k
          EvaluationKind(TEK_Scalar), UseLibcall(true) {
46
1.78k
      assert(!lvalue.isGlobalReg());
47
1.78k
      ASTContext &C = CGF.getContext();
48
1.78k
      if (lvalue.isSimple()) {
49
1.69k
        AtomicTy = lvalue.getType();
50
1.69k
        if (auto *ATy = AtomicTy->getAs<AtomicType>())
51
839
          ValueTy = ATy->getValueType();
52
852
        else
53
852
          ValueTy = AtomicTy;
54
1.69k
        EvaluationKind = CGF.getEvaluationKind(ValueTy);
55
1.69k
56
1.69k
        uint64_t ValueAlignInBits;
57
1.69k
        uint64_t AtomicAlignInBits;
58
1.69k
        TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59
1.69k
        ValueSizeInBits = ValueTI.Width;
60
1.69k
        ValueAlignInBits = ValueTI.Align;
61
1.69k
62
1.69k
        TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63
1.69k
        AtomicSizeInBits = AtomicTI.Width;
64
1.69k
        AtomicAlignInBits = AtomicTI.Align;
65
1.69k
66
1.69k
        assert(ValueSizeInBits <= AtomicSizeInBits);
67
1.69k
        assert(ValueAlignInBits <= AtomicAlignInBits);
68
1.69k
69
1.69k
        AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70
1.69k
        ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71
1.69k
        if (lvalue.getAlignment().isZero())
72
0
          lvalue.setAlignment(AtomicAlign);
73
1.69k
74
1.69k
        LVal = lvalue;
75
1.69k
      } else 
if (96
lvalue.isBitField()96
) {
76
80
        ValueTy = lvalue.getType();
77
80
        ValueSizeInBits = C.getTypeSize(ValueTy);
78
80
        auto &OrigBFI = lvalue.getBitFieldInfo();
79
80
        auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80
80
        AtomicSizeInBits = C.toBits(
81
80
            C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82
80
                .alignTo(lvalue.getAlignment()));
83
80
        auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
84
80
        auto OffsetInChars =
85
80
            (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86
80
            lvalue.getAlignment();
87
80
        VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88
80
            VoidPtrAddr, OffsetInChars.getQuantity());
89
80
        auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
90
80
            VoidPtrAddr,
91
80
            CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92
80
            "atomic_bitfield_base");
93
80
        BFI = OrigBFI;
94
80
        BFI.Offset = Offset;
95
80
        BFI.StorageSize = AtomicSizeInBits;
96
80
        BFI.StorageOffset += OffsetInChars;
97
80
        LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
98
80
                                    BFI, lvalue.getType(), lvalue.getBaseInfo(),
99
80
                                    lvalue.getTBAAInfo());
100
80
        AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101
80
        if (AtomicTy.isNull()) {
102
8
          llvm::APInt Size(
103
8
              /*numBits=*/32,
104
8
              C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
105
8
          AtomicTy =
106
8
              C.getConstantArrayType(C.CharTy, Size, nullptr, ArrayType::Normal,
107
8
                                     /*IndexTypeQuals=*/0);
108
8
        }
109
80
        AtomicAlign = ValueAlign = lvalue.getAlignment();
110
80
      } else 
if (16
lvalue.isVectorElt()16
) {
111
8
        ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
112
8
        ValueSizeInBits = C.getTypeSize(ValueTy);
113
8
        AtomicTy = lvalue.getType();
114
8
        AtomicSizeInBits = C.getTypeSize(AtomicTy);
115
8
        AtomicAlign = ValueAlign = lvalue.getAlignment();
116
8
        LVal = lvalue;
117
8
      } else {
118
8
        assert(lvalue.isExtVectorElt());
119
8
        ValueTy = lvalue.getType();
120
8
        ValueSizeInBits = C.getTypeSize(ValueTy);
121
8
        AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
122
8
            lvalue.getType(), lvalue.getExtVectorAddress()
123
8
                                  .getElementType()->getVectorNumElements());
124
8
        AtomicSizeInBits = C.getTypeSize(AtomicTy);
125
8
        AtomicAlign = ValueAlign = lvalue.getAlignment();
126
8
        LVal = lvalue;
127
8
      }
128
1.78k
      UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
129
1.78k
          AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
130
1.78k
    }
131
132
117
    QualType getAtomicType() const { return AtomicTy; }
133
280
    QualType getValueType() const { return ValueTy; }
134
2.43k
    CharUnits getAtomicAlignment() const { return AtomicAlign; }
135
225
    uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
136
255
    uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
137
859
    TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
138
628
    bool shouldUseLibcall() const { return UseLibcall; }
139
436
    const LValue &getAtomicLValue() const { return LVal; }
140
1.49k
    llvm::Value *getAtomicPointer() const {
141
1.49k
      if (LVal.isSimple())
142
1.14k
        return LVal.getPointer(CGF);
143
352
      else if (LVal.isBitField())
144
304
        return LVal.getBitFieldPointer();
145
48
      else if (LVal.isVectorElt())
146
24
        return LVal.getVectorPointer();
147
24
      assert(LVal.isExtVectorElt());
148
24
      return LVal.getExtVectorPointer();
149
24
    }
150
1.33k
    Address getAtomicAddress() const {
151
1.33k
      return Address(getAtomicPointer(), getAtomicAlignment());
152
1.33k
    }
153
154
543
    Address getAtomicAddressAsAtomicIntPointer() const {
155
543
      return emitCastToAtomicIntPointer(getAtomicAddress());
156
543
    }
157
158
    /// Is the atomic size larger than the underlying value type?
159
    ///
160
    /// Note that the absence of padding does not mean that atomic
161
    /// objects are completely interchangeable with non-atomic
162
    /// objects: we might have promoted the alignment of a type
163
    /// without making it bigger.
164
1.02k
    bool hasPadding() const {
165
1.02k
      return (ValueSizeInBits != AtomicSizeInBits);
166
1.02k
    }
167
168
    bool emitMemSetZeroIfNecessary() const;
169
170
164
    llvm::Value *getAtomicSizeValue() const {
171
164
      CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
172
164
      return CGF.CGM.getSize(size);
173
164
    }
174
175
    /// Cast the given pointer to an integer pointer suitable for atomic
176
    /// operations if the source.
177
    Address emitCastToAtomicIntPointer(Address Addr) const;
178
179
    /// If Addr is compatible with the iN that will be used for an atomic
180
    /// operation, bitcast it. Otherwise, create a temporary that is suitable
181
    /// and copy the value across.
182
    Address convertToAtomicIntPointer(Address Addr) const;
183
184
    /// Turn an atomic-layout object into an r-value.
185
    RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
186
                                     SourceLocation loc, bool AsValue) const;
187
188
    /// Converts a rvalue to integer value.
189
    llvm::Value *convertRValueToInt(RValue RVal) const;
190
191
    RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
192
                                     AggValueSlot ResultSlot,
193
                                     SourceLocation Loc, bool AsValue) const;
194
195
    /// Copy an atomic r-value into atomic-layout memory.
196
    void emitCopyIntoMemory(RValue rvalue) const;
197
198
    /// Project an l-value down to the value field.
199
166
    LValue projectValue() const {
200
166
      assert(LVal.isSimple());
201
166
      Address addr = getAtomicAddress();
202
166
      if (hasPadding())
203
2
        addr = CGF.Builder.CreateStructGEP(addr, 0);
204
166
205
166
      return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
206
166
                              LVal.getBaseInfo(), LVal.getTBAAInfo());
207
166
    }
208
209
    /// Emits atomic load.
210
    /// \returns Loaded value.
211
    RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
212
                          bool AsValue, llvm::AtomicOrdering AO,
213
                          bool IsVolatile);
214
215
    /// Emits atomic compare-and-exchange sequence.
216
    /// \param Expected Expected value.
217
    /// \param Desired Desired value.
218
    /// \param Success Atomic ordering for success operation.
219
    /// \param Failure Atomic ordering for failed operation.
220
    /// \param IsWeak true if atomic operation is weak, false otherwise.
221
    /// \returns Pair of values: previous value from storage (value type) and
222
    /// boolean flag (i1 type) with true if success and false otherwise.
223
    std::pair<RValue, llvm::Value *>
224
    EmitAtomicCompareExchange(RValue Expected, RValue Desired,
225
                              llvm::AtomicOrdering Success =
226
                                  llvm::AtomicOrdering::SequentiallyConsistent,
227
                              llvm::AtomicOrdering Failure =
228
                                  llvm::AtomicOrdering::SequentiallyConsistent,
229
                              bool IsWeak = false);
230
231
    /// Emits atomic update.
232
    /// \param AO Atomic ordering.
233
    /// \param UpdateOp Update operation for the current lvalue.
234
    void EmitAtomicUpdate(llvm::AtomicOrdering AO,
235
                          const llvm::function_ref<RValue(RValue)> &UpdateOp,
236
                          bool IsVolatile);
237
    /// Emits atomic update.
238
    /// \param AO Atomic ordering.
239
    void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
240
                          bool IsVolatile);
241
242
    /// Materialize an atomic r-value in atomic-layout memory.
243
    Address materializeRValue(RValue rvalue) const;
244
245
    /// Creates temp alloca for intermediate operations on atomic value.
246
    Address CreateTempAlloca() const;
247
  private:
248
    bool requiresMemSetZero(llvm::Type *type) const;
249
250
251
    /// Emits atomic load as a libcall.
252
    void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
253
                               llvm::AtomicOrdering AO, bool IsVolatile);
254
    /// Emits atomic load as LLVM instruction.
255
    llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
256
    /// Emits atomic compare-and-exchange op as a libcall.
257
    llvm::Value *EmitAtomicCompareExchangeLibcall(
258
        llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
259
        llvm::AtomicOrdering Success =
260
            llvm::AtomicOrdering::SequentiallyConsistent,
261
        llvm::AtomicOrdering Failure =
262
            llvm::AtomicOrdering::SequentiallyConsistent);
263
    /// Emits atomic compare-and-exchange op as LLVM instruction.
264
    std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
265
        llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
266
        llvm::AtomicOrdering Success =
267
            llvm::AtomicOrdering::SequentiallyConsistent,
268
        llvm::AtomicOrdering Failure =
269
            llvm::AtomicOrdering::SequentiallyConsistent,
270
        bool IsWeak = false);
271
    /// Emit atomic update as libcalls.
272
    void
273
    EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
274
                            const llvm::function_ref<RValue(RValue)> &UpdateOp,
275
                            bool IsVolatile);
276
    /// Emit atomic update as LLVM instructions.
277
    void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
278
                            const llvm::function_ref<RValue(RValue)> &UpdateOp,
279
                            bool IsVolatile);
280
    /// Emit atomic update as libcalls.
281
    void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
282
                                 bool IsVolatile);
283
    /// Emit atomic update as LLVM instructions.
284
    void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
285
                            bool IsVolatile);
286
  };
287
}
288
289
1.09k
Address AtomicInfo::CreateTempAlloca() const {
290
1.09k
  Address TempAlloca = CGF.CreateMemTemp(
291
1.09k
      (LVal.isBitField() && 
ValueSizeInBits > AtomicSizeInBits132
) ?
ValueTy54
292
1.09k
                                                                : 
AtomicTy1.04k
,
293
1.09k
      getAtomicAlignment(),
294
1.09k
      "atomic-temp");
295
1.09k
  // Cast to pointer to value type for bitfields.
296
1.09k
  if (LVal.isBitField())
297
132
    return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
298
132
        TempAlloca, getAtomicAddress().getType());
299
965
  return TempAlloca;
300
965
}
301
302
static RValue emitAtomicLibcall(CodeGenFunction &CGF,
303
                                StringRef fnName,
304
                                QualType resultType,
305
345
                                CallArgList &args) {
306
345
  const CGFunctionInfo &fnInfo =
307
345
    CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
308
345
  llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
309
345
  llvm::FunctionCallee fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
310
345
  auto callee = CGCallee::forDirect(fn);
311
345
  return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
312
345
}
313
314
/// Does a store of the given IR type modify the full expected width?
315
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
316
328
                           uint64_t expectedSize) {
317
328
  return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
318
328
}
319
320
/// Does the atomic type require memsetting to zero before initialization?
321
///
322
/// The IR type is provided as a way of making certain queries faster.
323
349
bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
324
349
  // If the atomic type has size padding, we definitely need a memset.
325
349
  if (hasPadding()) 
return true14
;
326
335
327
335
  // Otherwise, do some simple heuristics to try to avoid it:
328
335
  switch (getEvaluationKind()) {
329
0
  // For scalars and complexes, check whether the store size of the
330
0
  // type uses the full size.
331
288
  case TEK_Scalar:
332
288
    return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
333
40
  case TEK_Complex:
334
40
    return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
335
40
                           AtomicSizeInBits / 2);
336
0
337
0
  // Padding in structs has an undefined bit pattern.  User beware.
338
7
  case TEK_Aggregate:
339
7
    return false;
340
0
  }
341
0
  llvm_unreachable("bad evaluation kind");
342
0
}
343
344
166
bool AtomicInfo::emitMemSetZeroIfNecessary() const {
345
166
  assert(LVal.isSimple());
346
166
  llvm::Value *addr = LVal.getPointer(CGF);
347
166
  if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
348
132
    return false;
349
34
350
34
  CGF.Builder.CreateMemSet(
351
34
      addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
352
34
      CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
353
34
      LVal.getAlignment().getAsAlign());
354
34
  return true;
355
34
}
356
357
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
358
                              Address Dest, Address Ptr,
359
                              Address Val1, Address Val2,
360
                              uint64_t Size,
361
                              llvm::AtomicOrdering SuccessOrder,
362
                              llvm::AtomicOrdering FailureOrder,
363
114
                              llvm::SyncScope::ID Scope) {
364
114
  // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
365
114
  llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
366
114
  llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
367
114
368
114
  llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
369
114
      Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
370
114
      Scope);
371
114
  Pair->setVolatile(E->isVolatile());
372
114
  Pair->setWeak(IsWeak);
373
114
374
114
  // Cmp holds the result of the compare-exchange operation: true on success,
375
114
  // false on failure.
376
114
  llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
377
114
  llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
378
114
379
114
  // This basic block is used to hold the store instruction if the operation
380
114
  // failed.
381
114
  llvm::BasicBlock *StoreExpectedBB =
382
114
      CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
383
114
384
114
  // This basic block is the exit point of the operation, we should end up
385
114
  // here regardless of whether or not the operation succeeded.
386
114
  llvm::BasicBlock *ContinueBB =
387
114
      CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
388
114
389
114
  // Update Expected if Expected isn't equal to Old, otherwise branch to the
390
114
  // exit point.
391
114
  CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
392
114
393
114
  CGF.Builder.SetInsertPoint(StoreExpectedBB);
394
114
  // Update the memory at Expected with Old's value.
395
114
  CGF.Builder.CreateStore(Old, Val1);
396
114
  // Finally, branch to the exit point.
397
114
  CGF.Builder.CreateBr(ContinueBB);
398
114
399
114
  CGF.Builder.SetInsertPoint(ContinueBB);
400
114
  // Update the memory at Dest with Cmp's value.
401
114
  CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
402
114
}
403
404
/// Given an ordering required on success, emit all possible cmpxchg
405
/// instructions to cope with the provided (but possibly only dynamically known)
406
/// FailureOrder.
407
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
408
                                        bool IsWeak, Address Dest, Address Ptr,
409
                                        Address Val1, Address Val2,
410
                                        llvm::Value *FailureOrderVal,
411
                                        uint64_t Size,
412
                                        llvm::AtomicOrdering SuccessOrder,
413
80
                                        llvm::SyncScope::ID Scope) {
414
80
  llvm::AtomicOrdering FailureOrder;
415
80
  if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
416
38
    auto FOS = FO->getSExtValue();
417
38
    if (!llvm::isValidAtomicOrderingCABI(FOS))
418
2
      FailureOrder = llvm::AtomicOrdering::Monotonic;
419
36
    else
420
36
      switch ((llvm::AtomicOrderingCABI)FOS) {
421
7
      case llvm::AtomicOrderingCABI::relaxed:
422
7
      case llvm::AtomicOrderingCABI::release:
423
7
      case llvm::AtomicOrderingCABI::acq_rel:
424
7
        FailureOrder = llvm::AtomicOrdering::Monotonic;
425
7
        break;
426
20
      case llvm::AtomicOrderingCABI::consume:
427
20
      case llvm::AtomicOrderingCABI::acquire:
428
20
        FailureOrder = llvm::AtomicOrdering::Acquire;
429
20
        break;
430
20
      case llvm::AtomicOrderingCABI::seq_cst:
431
9
        FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
432
9
        break;
433
38
      }
434
38
    if (isStrongerThan(FailureOrder, SuccessOrder)) {
435
0
      // Don't assert on undefined behavior "failure argument shall be no
436
0
      // stronger than the success argument".
437
0
      FailureOrder =
438
0
          llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
439
0
    }
440
38
    emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
441
38
                      FailureOrder, Scope);
442
38
    return;
443
38
  }
444
42
445
42
  // Create all the relevant BB's
446
42
  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
447
42
                   *SeqCstBB = nullptr;
448
42
  MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
449
42
  if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
450
42
      
SuccessOrder != llvm::AtomicOrdering::Release34
)
451
26
    AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
452
42
  if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
453
8
    SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
454
42
455
42
  llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
456
42
457
42
  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
458
42
459
42
  // Emit all the different atomics
460
42
461
42
  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
462
42
  // doesn't matter unless someone is crazy enough to use something that
463
42
  // doesn't fold to a constant for the ordering.
464
42
  CGF.Builder.SetInsertPoint(MonotonicBB);
465
42
  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
466
42
                    Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
467
42
  CGF.Builder.CreateBr(ContBB);
468
42
469
42
  if (AcquireBB) {
470
26
    CGF.Builder.SetInsertPoint(AcquireBB);
471
26
    emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
472
26
                      Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
473
26
    CGF.Builder.CreateBr(ContBB);
474
26
    SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
475
26
                AcquireBB);
476
26
    SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
477
26
                AcquireBB);
478
26
  }
479
42
  if (SeqCstBB) {
480
8
    CGF.Builder.SetInsertPoint(SeqCstBB);
481
8
    emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
482
8
                      llvm::AtomicOrdering::SequentiallyConsistent, Scope);
483
8
    CGF.Builder.CreateBr(ContBB);
484
8
    SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
485
8
                SeqCstBB);
486
8
  }
487
42
488
42
  CGF.Builder.SetInsertPoint(ContBB);
489
42
}
490
491
/// Duplicate the atomic min/max operation in conventional IR for the builtin
492
/// variants that return the new rather than the original value.
493
static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
494
                                         AtomicExpr::AtomicOp Op,
495
                                         bool IsSigned,
496
                                         llvm::Value *OldVal,
497
14
                                         llvm::Value *RHS) {
498
14
  llvm::CmpInst::Predicate Pred;
499
14
  switch (Op) {
500
0
  default:
501
0
    llvm_unreachable("Unexpected min/max operation");
502
4
  case AtomicExpr::AO__atomic_max_fetch:
503
4
    Pred = IsSigned ? 
llvm::CmpInst::ICMP_SGT2
:
llvm::CmpInst::ICMP_UGT2
;
504
4
    break;
505
10
  case AtomicExpr::AO__atomic_min_fetch:
506
10
    Pred = IsSigned ? 
llvm::CmpInst::ICMP_SLT4
:
llvm::CmpInst::ICMP_ULT6
;
507
10
    break;
508
14
  }
509
14
  llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
510
14
  return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
511
14
}
512
513
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
514
                         Address Ptr, Address Val1, Address Val2,
515
                         llvm::Value *IsWeak, llvm::Value *FailureOrder,
516
                         uint64_t Size, llvm::AtomicOrdering Order,
517
1.74k
                         llvm::SyncScope::ID Scope) {
518
1.74k
  llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
519
1.74k
  bool PostOpMinMax = false;
520
1.74k
  unsigned PostOp = 0;
521
1.74k
522
1.74k
  switch (E->getOp()) {
523
0
  case AtomicExpr::AO__c11_atomic_init:
524
0
  case AtomicExpr::AO__opencl_atomic_init:
525
0
    llvm_unreachable("Already handled!");
526
0
527
35
  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
528
35
  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
529
35
    emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
530
35
                                FailureOrder, Size, Order, Scope);
531
35
    return;
532
35
  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
533
4
  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
534
4
    emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
535
4
                                FailureOrder, Size, Order, Scope);
536
4
    return;
537
29
  case AtomicExpr::AO__atomic_compare_exchange:
538
29
  case AtomicExpr::AO__atomic_compare_exchange_n: {
539
29
    if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
540
17
      emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
541
17
                                  Val1, Val2, FailureOrder, Size, Order, Scope);
542
17
    } else {
543
12
      // Create all the relevant BB's
544
12
      llvm::BasicBlock *StrongBB =
545
12
          CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
546
12
      llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
547
12
      llvm::BasicBlock *ContBB =
548
12
          CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
549
12
550
12
      llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
551
12
      SI->addCase(CGF.Builder.getInt1(false), StrongBB);
552
12
553
12
      CGF.Builder.SetInsertPoint(StrongBB);
554
12
      emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
555
12
                                  FailureOrder, Size, Order, Scope);
556
12
      CGF.Builder.CreateBr(ContBB);
557
12
558
12
      CGF.Builder.SetInsertPoint(WeakBB);
559
12
      emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
560
12
                                  FailureOrder, Size, Order, Scope);
561
12
      CGF.Builder.CreateBr(ContBB);
562
12
563
12
      CGF.Builder.SetInsertPoint(ContBB);
564
12
    }
565
29
    return;
566
29
  }
567
373
  case AtomicExpr::AO__c11_atomic_load:
568
373
  case AtomicExpr::AO__opencl_atomic_load:
569
373
  case AtomicExpr::AO__atomic_load_n:
570
373
  case AtomicExpr::AO__atomic_load: {
571
373
    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
572
373
    Load->setAtomic(Order, Scope);
573
373
    Load->setVolatile(E->isVolatile());
574
373
    CGF.Builder.CreateStore(Load, Dest);
575
373
    return;
576
373
  }
577
373
578
373
  case AtomicExpr::AO__c11_atomic_store:
579
344
  case AtomicExpr::AO__opencl_atomic_store:
580
344
  case AtomicExpr::AO__atomic_store:
581
344
  case AtomicExpr::AO__atomic_store_n: {
582
344
    llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
583
344
    llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
584
344
    Store->setAtomic(Order, Scope);
585
344
    Store->setVolatile(E->isVolatile());
586
344
    return;
587
344
  }
588
344
589
344
  case AtomicExpr::AO__c11_atomic_exchange:
590
25
  case AtomicExpr::AO__opencl_atomic_exchange:
591
25
  case AtomicExpr::AO__atomic_exchange_n:
592
25
  case AtomicExpr::AO__atomic_exchange:
593
25
    Op = llvm::AtomicRMWInst::Xchg;
594
25
    break;
595
25
596
69
  case AtomicExpr::AO__atomic_add_fetch:
597
69
    PostOp = llvm::Instruction::Add;
598
69
    LLVM_FALLTHROUGH;
599
486
  case AtomicExpr::AO__c11_atomic_fetch_add:
600
486
  case AtomicExpr::AO__opencl_atomic_fetch_add:
601
486
  case AtomicExpr::AO__atomic_fetch_add:
602
486
    Op = llvm::AtomicRMWInst::Add;
603
486
    break;
604
486
605
486
  case AtomicExpr::AO__atomic_sub_fetch:
606
0
    PostOp = llvm::Instruction::Sub;
607
0
    LLVM_FALLTHROUGH;
608
383
  case AtomicExpr::AO__c11_atomic_fetch_sub:
609
383
  case AtomicExpr::AO__opencl_atomic_fetch_sub:
610
383
  case AtomicExpr::AO__atomic_fetch_sub:
611
383
    Op = llvm::AtomicRMWInst::Sub;
612
383
    break;
613
383
614
383
  case AtomicExpr::AO__atomic_min_fetch:
615
8
    PostOpMinMax = true;
616
8
    LLVM_FALLTHROUGH;
617
24
  case AtomicExpr::AO__c11_atomic_fetch_min:
618
24
  case AtomicExpr::AO__opencl_atomic_fetch_min:
619
24
  case AtomicExpr::AO__atomic_fetch_min:
620
24
    Op = E->getValueType()->isSignedIntegerType() ? 
llvm::AtomicRMWInst::Min13
621
24
                                                  : 
llvm::AtomicRMWInst::UMin11
;
622
24
    break;
623
24
624
24
  case AtomicExpr::AO__atomic_max_fetch:
625
4
    PostOpMinMax = true;
626
4
    LLVM_FALLTHROUGH;
627
20
  case AtomicExpr::AO__c11_atomic_fetch_max:
628
20
  case AtomicExpr::AO__opencl_atomic_fetch_max:
629
20
  case AtomicExpr::AO__atomic_fetch_max:
630
20
    Op = E->getValueType()->isSignedIntegerType() ? 
llvm::AtomicRMWInst::Max11
631
20
                                                  : 
llvm::AtomicRMWInst::UMax9
;
632
20
    break;
633
20
634
20
  case AtomicExpr::AO__atomic_and_fetch:
635
0
    PostOp = llvm::Instruction::And;
636
0
    LLVM_FALLTHROUGH;
637
6
  case AtomicExpr::AO__c11_atomic_fetch_and:
638
6
  case AtomicExpr::AO__opencl_atomic_fetch_and:
639
6
  case AtomicExpr::AO__atomic_fetch_and:
640
6
    Op = llvm::AtomicRMWInst::And;
641
6
    break;
642
6
643
6
  case AtomicExpr::AO__atomic_or_fetch:
644
2
    PostOp = llvm::Instruction::Or;
645
2
    LLVM_FALLTHROUGH;
646
7
  case AtomicExpr::AO__c11_atomic_fetch_or:
647
7
  case AtomicExpr::AO__opencl_atomic_fetch_or:
648
7
  case AtomicExpr::AO__atomic_fetch_or:
649
7
    Op = llvm::AtomicRMWInst::Or;
650
7
    break;
651
7
652
7
  case AtomicExpr::AO__atomic_xor_fetch:
653
0
    PostOp = llvm::Instruction::Xor;
654
0
    LLVM_FALLTHROUGH;
655
4
  case AtomicExpr::AO__c11_atomic_fetch_xor:
656
4
  case AtomicExpr::AO__opencl_atomic_fetch_xor:
657
4
  case AtomicExpr::AO__atomic_fetch_xor:
658
4
    Op = llvm::AtomicRMWInst::Xor;
659
4
    break;
660
4
661
4
  case AtomicExpr::AO__atomic_nand_fetch:
662
4
    PostOp = llvm::Instruction::And; // the NOT is special cased below
663
4
    LLVM_FALLTHROUGH;
664
8
  case AtomicExpr::AO__atomic_fetch_nand:
665
8
    Op = llvm::AtomicRMWInst::Nand;
666
8
    break;
667
963
  }
668
963
669
963
  llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
670
963
  llvm::AtomicRMWInst *RMWI =
671
963
      CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
672
963
  RMWI->setVolatile(E->isVolatile());
673
963
674
963
  // For __atomic_*_fetch operations, perform the operation again to
675
963
  // determine the value which was written.
676
963
  llvm::Value *Result = RMWI;
677
963
  if (PostOpMinMax)
678
12
    Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
679
12
                                  E->getValueType()->isSignedIntegerType(),
680
12
                                  RMWI, LoadVal1);
681
951
  else if (PostOp)
682
75
    Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
683
75
                                     LoadVal1);
684
963
  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
685
4
    Result = CGF.Builder.CreateNot(Result);
686
963
  CGF.Builder.CreateStore(Result, Dest);
687
963
}
688
689
// This function emits any expression (scalar, complex, or aggregate)
690
// into a temporary alloca.
691
static Address
692
624
EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
693
624
  Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
694
624
  CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
695
624
                       /*Init*/ true);
696
624
  return DeclPtr;
697
624
}
698
699
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
700
                         Address Ptr, Address Val1, Address Val2,
701
                         llvm::Value *IsWeak, llvm::Value *FailureOrder,
702
                         uint64_t Size, llvm::AtomicOrdering Order,
703
1.72k
                         llvm::Value *Scope) {
704
1.72k
  auto ScopeModel = Expr->getScopeModel();
705
1.72k
706
1.72k
  // LLVM atomic instructions always have synch scope. If clang atomic
707
1.72k
  // expression has no scope operand, use default LLVM synch scope.
708
1.72k
  if (!ScopeModel) {
709
1.66k
    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
710
1.66k
                 Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
711
1.66k
    return;
712
1.66k
  }
713
58
714
58
  // Handle constant scope.
715
58
  if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
716
50
    auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
717
50
        CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
718
50
        Order, CGF.CGM.getLLVMContext());
719
50
    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
720
50
                 Order, SCID);
721
50
    return;
722
50
  }
723
8
724
8
  // Handle non-constant scope.
725
8
  auto &Builder = CGF.Builder;
726
8
  auto Scopes = ScopeModel->getRuntimeValues();
727
8
  llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
728
8
  for (auto S : Scopes)
729
32
    BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
730
8
731
8
  llvm::BasicBlock *ContBB =
732
8
      CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
733
8
734
8
  auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
735
8
  // If unsupported synch scope is encountered at run time, assume a fallback
736
8
  // synch scope value.
737
8
  auto FallBack = ScopeModel->getFallBackValue();
738
8
  llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
739
32
  for (auto S : Scopes) {
740
32
    auto *B = BB[S];
741
32
    if (S != FallBack)
742
24
      SI->addCase(Builder.getInt32(S), B);
743
32
744
32
    Builder.SetInsertPoint(B);
745
32
    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
746
32
                 Order,
747
32
                 CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
748
32
                                                         ScopeModel->map(S),
749
32
                                                         Order,
750
32
                                                         CGF.getLLVMContext()));
751
32
    Builder.CreateBr(ContBB);
752
32
  }
753
8
754
8
  Builder.SetInsertPoint(ContBB);
755
8
}
756
757
static void
758
AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
759
                  bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
760
133
                  SourceLocation Loc, CharUnits SizeInChars) {
761
133
  if (UseOptimizedLibcall) {
762
81
    // Load value and pass it to the function directly.
763
81
    CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
764
81
    int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
765
81
    ValTy =
766
81
        CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
767
81
    llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
768
81
                                                SizeInBits)->getPointerTo();
769
81
    Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
770
81
    Val = CGF.EmitLoadOfScalar(Ptr, false,
771
81
                               CGF.getContext().getPointerType(ValTy),
772
81
                               Loc);
773
81
    // Coerce the value into an appropriately sized integer type.
774
81
    Args.add(RValue::get(Val), ValTy);
775
81
  } else {
776
52
    // Non-optimized functions always take a reference.
777
52
    Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
778
52
                         CGF.getContext().VoidPtrTy);
779
52
  }
780
133
}
781
782
1.00k
RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
783
1.00k
  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
784
1.00k
  QualType MemTy = AtomicTy;
785
1.00k
  if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
786
615
    MemTy = AT->getValueType();
787
1.00k
  llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
788
1.00k
789
1.00k
  Address Val1 = Address::invalid();
790
1.00k
  Address Val2 = Address::invalid();
791
1.00k
  Address Dest = Address::invalid();
792
1.00k
  Address Ptr = EmitPointerWithAlignment(E->getPtr());
793
1.00k
794
1.00k
  if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
795
1.00k
      
E->getOp() == AtomicExpr::AO__opencl_atomic_init991
) {
796
16
    LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
797
16
    EmitAtomicInit(E->getVal1(), lvalue);
798
16
    return RValue::get(nullptr);
799
16
  }
800
989
801
989
  CharUnits sizeChars, alignChars;
802
989
  std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
803
989
  uint64_t Size = sizeChars.getQuantity();
804
989
  unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
805
989
806
989
  bool Oversized = getContext().toBits(sizeChars) > MaxInlineWidthInBits;
807
989
  bool Misaligned = (Ptr.getAlignment() % sizeChars) != 0;
808
989
  bool UseLibcall = Misaligned | Oversized;
809
989
810
989
  if (UseLibcall) {
811
181
    CGM.getDiags().Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
812
181
        << !Oversized;
813
181
  }
814
989
815
989
  llvm::Value *Order = EmitScalarExpr(E->getOrder());
816
989
  llvm::Value *Scope =
817
989
      E->getScopeModel() ? 
EmitScalarExpr(E->getScope())73
:
nullptr916
;
818
989
819
989
  switch (E->getOp()) {
820
0
  case AtomicExpr::AO__c11_atomic_init:
821
0
  case AtomicExpr::AO__opencl_atomic_init:
822
0
    llvm_unreachable("Already handled above with EmitAtomicInit!");
823
0
824
153
  case AtomicExpr::AO__c11_atomic_load:
825
153
  case AtomicExpr::AO__opencl_atomic_load:
826
153
  case AtomicExpr::AO__atomic_load_n:
827
153
    break;
828
153
829
153
  case AtomicExpr::AO__atomic_load:
830
86
    Dest = EmitPointerWithAlignment(E->getVal1());
831
86
    break;
832
153
833
153
  case AtomicExpr::AO__atomic_store:
834
84
    Val1 = EmitPointerWithAlignment(E->getVal1());
835
84
    break;
836
153
837
153
  case AtomicExpr::AO__atomic_exchange:
838
14
    Val1 = EmitPointerWithAlignment(E->getVal1());
839
14
    Dest = EmitPointerWithAlignment(E->getVal2());
840
14
    break;
841
153
842
153
  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
843
69
  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
844
69
  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
845
69
  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
846
69
  case AtomicExpr::AO__atomic_compare_exchange_n:
847
69
  case AtomicExpr::AO__atomic_compare_exchange:
848
69
    Val1 = EmitPointerWithAlignment(E->getVal1());
849
69
    if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
850
22
      Val2 = EmitPointerWithAlignment(E->getVal2());
851
47
    else
852
47
      Val2 = EmitValToTemp(*this, E->getVal2());
853
69
    OrderFail = EmitScalarExpr(E->getOrderFail());
854
69
    if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
855
69
        
E->getOp() == AtomicExpr::AO__atomic_compare_exchange61
)
856
30
      IsWeak = EmitScalarExpr(E->getWeak());
857
69
    break;
858
69
859
229
  case AtomicExpr::AO__c11_atomic_fetch_add:
860
229
  case AtomicExpr::AO__c11_atomic_fetch_sub:
861
229
  case AtomicExpr::AO__opencl_atomic_fetch_add:
862
229
  case AtomicExpr::AO__opencl_atomic_fetch_sub:
863
229
    if (MemTy->isPointerType()) {
864
6
      // For pointer arithmetic, we're required to do a bit of math:
865
6
      // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
866
6
      // ... but only for the C11 builtins. The GNU builtins expect the
867
6
      // user to multiply by sizeof(T).
868
6
      QualType Val1Ty = E->getVal1()->getType();
869
6
      llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
870
6
      CharUnits PointeeIncAmt =
871
6
          getContext().getTypeSizeInChars(MemTy->getPointeeType());
872
6
      Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
873
6
      auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
874
6
      Val1 = Temp;
875
6
      EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
876
6
      break;
877
6
    }
878
223
      LLVM_FALLTHROUGH;
879
577
  case AtomicExpr::AO__atomic_fetch_add:
880
577
  case AtomicExpr::AO__atomic_fetch_sub:
881
577
  case AtomicExpr::AO__atomic_add_fetch:
882
577
  case AtomicExpr::AO__atomic_sub_fetch:
883
577
  case AtomicExpr::AO__c11_atomic_store:
884
577
  case AtomicExpr::AO__c11_atomic_exchange:
885
577
  case AtomicExpr::AO__opencl_atomic_store:
886
577
  case AtomicExpr::AO__opencl_atomic_exchange:
887
577
  case AtomicExpr::AO__atomic_store_n:
888
577
  case AtomicExpr::AO__atomic_exchange_n:
889
577
  case AtomicExpr::AO__c11_atomic_fetch_and:
890
577
  case AtomicExpr::AO__c11_atomic_fetch_or:
891
577
  case AtomicExpr::AO__c11_atomic_fetch_xor:
892
577
  case AtomicExpr::AO__c11_atomic_fetch_max:
893
577
  case AtomicExpr::AO__c11_atomic_fetch_min:
894
577
  case AtomicExpr::AO__opencl_atomic_fetch_and:
895
577
  case AtomicExpr::AO__opencl_atomic_fetch_or:
896
577
  case AtomicExpr::AO__opencl_atomic_fetch_xor:
897
577
  case AtomicExpr::AO__opencl_atomic_fetch_min:
898
577
  case AtomicExpr::AO__opencl_atomic_fetch_max:
899
577
  case AtomicExpr::AO__atomic_fetch_and:
900
577
  case AtomicExpr::AO__atomic_fetch_or:
901
577
  case AtomicExpr::AO__atomic_fetch_xor:
902
577
  case AtomicExpr::AO__atomic_fetch_nand:
903
577
  case AtomicExpr::AO__atomic_and_fetch:
904
577
  case AtomicExpr::AO__atomic_or_fetch:
905
577
  case AtomicExpr::AO__atomic_xor_fetch:
906
577
  case AtomicExpr::AO__atomic_nand_fetch:
907
577
  case AtomicExpr::AO__atomic_max_fetch:
908
577
  case AtomicExpr::AO__atomic_min_fetch:
909
577
  case AtomicExpr::AO__atomic_fetch_max:
910
577
  case AtomicExpr::AO__atomic_fetch_min:
911
577
    Val1 = EmitValToTemp(*this, E->getVal1());
912
577
    break;
913
989
  }
914
989
915
989
  QualType RValTy = E->getType().getUnqualifiedType();
916
989
917
989
  // The inlined atomics only function on iN types, where N is a power of 2. We
918
989
  // need to make sure (via temporaries if necessary) that all incoming values
919
989
  // are compatible.
920
989
  LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
921
989
  AtomicInfo Atomics(*this, AtomicVal);
922
989
923
989
  Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
924
989
  if (Val1.isValid()) 
Val1 = Atomics.convertToAtomicIntPointer(Val1)750
;
925
989
  if (Val2.isValid()) 
Val2 = Atomics.convertToAtomicIntPointer(Val2)69
;
926
989
  if (Dest.isValid())
927
100
    Dest = Atomics.emitCastToAtomicIntPointer(Dest);
928
889
  else if (E->isCmpXChg())
929
69
    Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
930
820
  else if (!RValTy->isVoidType())
931
595
    Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
932
989
933
989
  // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
934
989
  if (UseLibcall) {
935
181
    bool UseOptimizedLibcall = false;
936
181
    switch (E->getOp()) {
937
0
    case AtomicExpr::AO__c11_atomic_init:
938
0
    case AtomicExpr::AO__opencl_atomic_init:
939
0
      llvm_unreachable("Already handled above with EmitAtomicInit!");
940
0
941
40
    case AtomicExpr::AO__c11_atomic_fetch_add:
942
40
    case AtomicExpr::AO__opencl_atomic_fetch_add:
943
40
    case AtomicExpr::AO__atomic_fetch_add:
944
40
    case AtomicExpr::AO__c11_atomic_fetch_and:
945
40
    case AtomicExpr::AO__opencl_atomic_fetch_and:
946
40
    case AtomicExpr::AO__atomic_fetch_and:
947
40
    case AtomicExpr::AO__c11_atomic_fetch_or:
948
40
    case AtomicExpr::AO__opencl_atomic_fetch_or:
949
40
    case AtomicExpr::AO__atomic_fetch_or:
950
40
    case AtomicExpr::AO__atomic_fetch_nand:
951
40
    case AtomicExpr::AO__c11_atomic_fetch_sub:
952
40
    case AtomicExpr::AO__opencl_atomic_fetch_sub:
953
40
    case AtomicExpr::AO__atomic_fetch_sub:
954
40
    case AtomicExpr::AO__c11_atomic_fetch_xor:
955
40
    case AtomicExpr::AO__opencl_atomic_fetch_xor:
956
40
    case AtomicExpr::AO__opencl_atomic_fetch_min:
957
40
    case AtomicExpr::AO__opencl_atomic_fetch_max:
958
40
    case AtomicExpr::AO__atomic_fetch_xor:
959
40
    case AtomicExpr::AO__c11_atomic_fetch_max:
960
40
    case AtomicExpr::AO__c11_atomic_fetch_min:
961
40
    case AtomicExpr::AO__atomic_add_fetch:
962
40
    case AtomicExpr::AO__atomic_and_fetch:
963
40
    case AtomicExpr::AO__atomic_nand_fetch:
964
40
    case AtomicExpr::AO__atomic_or_fetch:
965
40
    case AtomicExpr::AO__atomic_sub_fetch:
966
40
    case AtomicExpr::AO__atomic_xor_fetch:
967
40
    case AtomicExpr::AO__atomic_fetch_max:
968
40
    case AtomicExpr::AO__atomic_fetch_min:
969
40
    case AtomicExpr::AO__atomic_max_fetch:
970
40
    case AtomicExpr::AO__atomic_min_fetch:
971
40
      // For these, only library calls for certain sizes exist.
972
40
      UseOptimizedLibcall = true;
973
40
      break;
974
40
975
87
    case AtomicExpr::AO__atomic_load:
976
87
    case AtomicExpr::AO__atomic_store:
977
87
    case AtomicExpr::AO__atomic_exchange:
978
87
    case AtomicExpr::AO__atomic_compare_exchange:
979
87
      // Use the generic version if we don't know that the operand will be
980
87
      // suitably aligned for the optimized version.
981
87
      if (Misaligned)
982
62
        break;
983
25
      LLVM_FALLTHROUGH;
984
79
    case AtomicExpr::AO__c11_atomic_load:
985
79
    case AtomicExpr::AO__c11_atomic_store:
986
79
    case AtomicExpr::AO__c11_atomic_exchange:
987
79
    case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
988
79
    case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
989
79
    case AtomicExpr::AO__opencl_atomic_load:
990
79
    case AtomicExpr::AO__opencl_atomic_store:
991
79
    case AtomicExpr::AO__opencl_atomic_exchange:
992
79
    case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
993
79
    case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
994
79
    case AtomicExpr::AO__atomic_load_n:
995
79
    case AtomicExpr::AO__atomic_store_n:
996
79
    case AtomicExpr::AO__atomic_exchange_n:
997
79
    case AtomicExpr::AO__atomic_compare_exchange_n:
998
79
      // Only use optimized library calls for sizes for which they exist.
999
79
      // FIXME: Size == 16 optimized library functions exist too.
1000
79
      if (Size == 1 || 
Size == 271
||
Size == 469
||
Size == 840
)
1001
63
        UseOptimizedLibcall = true;
1002
79
      break;
1003
181
    }
1004
181
1005
181
    CallArgList Args;
1006
181
    if (!UseOptimizedLibcall) {
1007
78
      // For non-optimized library calls, the size is the first parameter
1008
78
      Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
1009
78
               getContext().getSizeType());
1010
78
    }
1011
181
    // Atomic address is the first or second parameter
1012
181
    // The OpenCL atomic library functions only accept pointer arguments to
1013
181
    // generic address space.
1014
206
    auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
1015
206
      if (!E->isOpenCL())
1016
168
        return V;
1017
38
      auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
1018
38
      if (AS == LangAS::opencl_generic)
1019
21
        return V;
1020
17
      auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
1021
17
      auto T = V->getType();
1022
17
      auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
1023
17
1024
17
      return getTargetHooks().performAddrSpaceCast(
1025
17
          *this, V, AS, LangAS::opencl_generic, DestType, false);
1026
17
    };
1027
181
1028
181
    Args.add(RValue::get(CastToGenericAddrSpace(
1029
181
                 EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
1030
181
             getContext().VoidPtrTy);
1031
181
1032
181
    std::string LibCallName;
1033
181
    QualType LoweredMemTy =
1034
181
      MemTy->isPointerType() ? 
getContext().getIntPtrType()3
:
MemTy178
;
1035
181
    QualType RetTy;
1036
181
    bool HaveRetTy = false;
1037
181
    llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
1038
181
    bool PostOpMinMax = false;
1039
181
    switch (E->getOp()) {
1040
0
    case AtomicExpr::AO__c11_atomic_init:
1041
0
    case AtomicExpr::AO__opencl_atomic_init:
1042
0
      llvm_unreachable("Already handled!");
1043
0
1044
0
    // There is only one libcall for compare an exchange, because there is no
1045
0
    // optimisation benefit possible from a libcall version of a weak compare
1046
0
    // and exchange.
1047
0
    // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1048
0
    //                                void *desired, int success, int failure)
1049
0
    // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
1050
0
    //                                  int success, int failure)
1051
25
    case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1052
25
    case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1053
25
    case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1054
25
    case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1055
25
    case AtomicExpr::AO__atomic_compare_exchange:
1056
25
    case AtomicExpr::AO__atomic_compare_exchange_n:
1057
25
      LibCallName = "__atomic_compare_exchange";
1058
25
      RetTy = getContext().BoolTy;
1059
25
      HaveRetTy = true;
1060
25
      Args.add(
1061
25
          RValue::get(CastToGenericAddrSpace(
1062
25
              EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
1063
25
          getContext().VoidPtrTy);
1064
25
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
1065
25
                        MemTy, E->getExprLoc(), sizeChars);
1066
25
      Args.add(RValue::get(Order), getContext().IntTy);
1067
25
      Order = OrderFail;
1068
25
      break;
1069
25
    // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1070
25
    //                        int order)
1071
25
    // T __atomic_exchange_N(T *mem, T val, int order)
1072
25
    case AtomicExpr::AO__c11_atomic_exchange:
1073
13
    case AtomicExpr::AO__opencl_atomic_exchange:
1074
13
    case AtomicExpr::AO__atomic_exchange_n:
1075
13
    case AtomicExpr::AO__atomic_exchange:
1076
13
      LibCallName = "__atomic_exchange";
1077
13
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1078
13
                        MemTy, E->getExprLoc(), sizeChars);
1079
13
      break;
1080
13
    // void __atomic_store(size_t size, void *mem, void *val, int order)
1081
13
    // void __atomic_store_N(T *mem, T val, int order)
1082
55
    case AtomicExpr::AO__c11_atomic_store:
1083
55
    case AtomicExpr::AO__opencl_atomic_store:
1084
55
    case AtomicExpr::AO__atomic_store:
1085
55
    case AtomicExpr::AO__atomic_store_n:
1086
55
      LibCallName = "__atomic_store";
1087
55
      RetTy = getContext().VoidTy;
1088
55
      HaveRetTy = true;
1089
55
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1090
55
                        MemTy, E->getExprLoc(), sizeChars);
1091
55
      break;
1092
55
    // void __atomic_load(size_t size, void *mem, void *return, int order)
1093
55
    // T __atomic_load_N(T *mem, int order)
1094
55
    case AtomicExpr::AO__c11_atomic_load:
1095
48
    case AtomicExpr::AO__opencl_atomic_load:
1096
48
    case AtomicExpr::AO__atomic_load:
1097
48
    case AtomicExpr::AO__atomic_load_n:
1098
48
      LibCallName = "__atomic_load";
1099
48
      break;
1100
48
    // T __atomic_add_fetch_N(T *mem, T val, int order)
1101
48
    // T __atomic_fetch_add_N(T *mem, T val, int order)
1102
48
    case AtomicExpr::AO__atomic_add_fetch:
1103
1
      PostOp = llvm::Instruction::Add;
1104
1
      LLVM_FALLTHROUGH;
1105
17
    case AtomicExpr::AO__c11_atomic_fetch_add:
1106
17
    case AtomicExpr::AO__opencl_atomic_fetch_add:
1107
17
    case AtomicExpr::AO__atomic_fetch_add:
1108
17
      LibCallName = "__atomic_fetch_add";
1109
17
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1110
17
                        LoweredMemTy, E->getExprLoc(), sizeChars);
1111
17
      break;
1112
17
    // T __atomic_and_fetch_N(T *mem, T val, int order)
1113
17
    // T __atomic_fetch_and_N(T *mem, T val, int order)
1114
17
    case AtomicExpr::AO__atomic_and_fetch:
1115
1
      PostOp = llvm::Instruction::And;
1116
1
      LLVM_FALLTHROUGH;
1117
2
    case AtomicExpr::AO__c11_atomic_fetch_and:
1118
2
    case AtomicExpr::AO__opencl_atomic_fetch_and:
1119
2
    case AtomicExpr::AO__atomic_fetch_and:
1120
2
      LibCallName = "__atomic_fetch_and";
1121
2
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1122
2
                        MemTy, E->getExprLoc(), sizeChars);
1123
2
      break;
1124
2
    // T __atomic_or_fetch_N(T *mem, T val, int order)
1125
2
    // T __atomic_fetch_or_N(T *mem, T val, int order)
1126
2
    case AtomicExpr::AO__atomic_or_fetch:
1127
1
      PostOp = llvm::Instruction::Or;
1128
1
      LLVM_FALLTHROUGH;
1129
2
    case AtomicExpr::AO__c11_atomic_fetch_or:
1130
2
    case AtomicExpr::AO__opencl_atomic_fetch_or:
1131
2
    case AtomicExpr::AO__atomic_fetch_or:
1132
2
      LibCallName = "__atomic_fetch_or";
1133
2
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1134
2
                        MemTy, E->getExprLoc(), sizeChars);
1135
2
      break;
1136
2
    // T __atomic_sub_fetch_N(T *mem, T val, int order)
1137
2
    // T __atomic_fetch_sub_N(T *mem, T val, int order)
1138
2
    case AtomicExpr::AO__atomic_sub_fetch:
1139
1
      PostOp = llvm::Instruction::Sub;
1140
1
      LLVM_FALLTHROUGH;
1141
9
    case AtomicExpr::AO__c11_atomic_fetch_sub:
1142
9
    case AtomicExpr::AO__opencl_atomic_fetch_sub:
1143
9
    case AtomicExpr::AO__atomic_fetch_sub:
1144
9
      LibCallName = "__atomic_fetch_sub";
1145
9
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1146
9
                        LoweredMemTy, E->getExprLoc(), sizeChars);
1147
9
      break;
1148
9
    // T __atomic_xor_fetch_N(T *mem, T val, int order)
1149
9
    // T __atomic_fetch_xor_N(T *mem, T val, int order)
1150
9
    case AtomicExpr::AO__atomic_xor_fetch:
1151
1
      PostOp = llvm::Instruction::Xor;
1152
1
      LLVM_FALLTHROUGH;
1153
2
    case AtomicExpr::AO__c11_atomic_fetch_xor:
1154
2
    case AtomicExpr::AO__opencl_atomic_fetch_xor:
1155
2
    case AtomicExpr::AO__atomic_fetch_xor:
1156
2
      LibCallName = "__atomic_fetch_xor";
1157
2
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1158
2
                        MemTy, E->getExprLoc(), sizeChars);
1159
2
      break;
1160
2
    case AtomicExpr::AO__atomic_min_fetch:
1161
2
      PostOpMinMax = true;
1162
2
      LLVM_FALLTHROUGH;
1163
6
    case AtomicExpr::AO__c11_atomic_fetch_min:
1164
6
    case AtomicExpr::AO__atomic_fetch_min:
1165
6
    case AtomicExpr::AO__opencl_atomic_fetch_min:
1166
6
      LibCallName = E->getValueType()->isSignedIntegerType()
1167
6
                        ? 
"__atomic_fetch_min"2
1168
6
                        : 
"__atomic_fetch_umin"4
;
1169
6
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1170
6
                        LoweredMemTy, E->getExprLoc(), sizeChars);
1171
6
      break;
1172
6
    case AtomicExpr::AO__atomic_max_fetch:
1173
0
      PostOpMinMax = true;
1174
0
      LLVM_FALLTHROUGH;
1175
0
    case AtomicExpr::AO__c11_atomic_fetch_max:
1176
0
    case AtomicExpr::AO__atomic_fetch_max:
1177
0
    case AtomicExpr::AO__opencl_atomic_fetch_max:
1178
0
      LibCallName = E->getValueType()->isSignedIntegerType()
1179
0
                        ? "__atomic_fetch_max"
1180
0
                        : "__atomic_fetch_umax";
1181
0
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1182
0
                        LoweredMemTy, E->getExprLoc(), sizeChars);
1183
0
      break;
1184
0
    // T __atomic_nand_fetch_N(T *mem, T val, int order)
1185
0
    // T __atomic_fetch_nand_N(T *mem, T val, int order)
1186
1
    case AtomicExpr::AO__atomic_nand_fetch:
1187
1
      PostOp = llvm::Instruction::And; // the NOT is special cased below
1188
1
      LLVM_FALLTHROUGH;
1189
2
    case AtomicExpr::AO__atomic_fetch_nand:
1190
2
      LibCallName = "__atomic_fetch_nand";
1191
2
      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1192
2
                        MemTy, E->getExprLoc(), sizeChars);
1193
2
      break;
1194
181
    }
1195
181
1196
181
    if (E->isOpenCL()) {
1197
27
      LibCallName = std::string("__opencl") +
1198
27
          StringRef(LibCallName).drop_front(1).str();
1199
27
1200
27
    }
1201
181
    // Optimized functions have the size in their name.
1202
181
    if (UseOptimizedLibcall)
1203
103
      LibCallName += "_" + llvm::utostr(Size);
1204
181
    // By default, assume we return a value of the atomic type.
1205
181
    if (!HaveRetTy) {
1206
101
      if (UseOptimizedLibcall) {
1207
63
        // Value is returned directly.
1208
63
        // The function returns an appropriately sized integer type.
1209
63
        RetTy = getContext().getIntTypeForBitwidth(
1210
63
            getContext().toBits(sizeChars), /*Signed=*/false);
1211
63
      } else {
1212
38
        // Value is returned through parameter before the order.
1213
38
        RetTy = getContext().VoidTy;
1214
38
        Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1215
38
                 getContext().VoidPtrTy);
1216
38
      }
1217
101
    }
1218
181
    // order is always the last parameter
1219
181
    Args.add(RValue::get(Order),
1220
181
             getContext().IntTy);
1221
181
    if (E->isOpenCL())
1222
27
      Args.add(RValue::get(Scope), getContext().IntTy);
1223
181
1224
181
    // PostOp is only needed for the atomic_*_fetch operations, and
1225
181
    // thus is only needed for and implemented in the
1226
181
    // UseOptimizedLibcall codepath.
1227
181
    assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
1228
181
1229
181
    RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1230
181
    // The value is returned directly from the libcall.
1231
181
    if (E->isCmpXChg())
1232
25
      return Res;
1233
156
1234
156
    // The value is returned directly for optimized libcalls but the expr
1235
156
    // provided an out-param.
1236
156
    if (UseOptimizedLibcall && 
Res.getScalarVal()91
) {
1237
63
      llvm::Value *ResVal = Res.getScalarVal();
1238
63
      if (PostOpMinMax) {
1239
2
        llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1240
2
        ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
1241
2
                                      E->getValueType()->isSignedIntegerType(),
1242
2
                                      ResVal, LoadVal1);
1243
61
      } else if (PostOp) {
1244
6
        llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1245
6
        ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1246
6
      }
1247
63
      if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1248
1
        ResVal = Builder.CreateNot(ResVal);
1249
63
1250
63
      Builder.CreateStore(
1251
63
          ResVal,
1252
63
          Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1253
63
    }
1254
156
1255
156
    if (RValTy->isVoidType())
1256
98
      return RValue::get(nullptr);
1257
58
1258
58
    return convertTempToRValue(
1259
58
        Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1260
58
        RValTy, E->getExprLoc());
1261
58
  }
1262
808
1263
808
  bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1264
808
                 
E->getOp() == AtomicExpr::AO__opencl_atomic_store701
||
1265
808
                 
E->getOp() == AtomicExpr::AO__atomic_store691
||
1266
808
                 
E->getOp() == AtomicExpr::AO__atomic_store_n642
;
1267
808
  bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1268
808
                
E->getOp() == AtomicExpr::AO__opencl_atomic_load703
||
1269
808
                
E->getOp() == AtomicExpr::AO__atomic_load687
||
1270
808
                
E->getOp() == AtomicExpr::AO__atomic_load_n636
;
1271
808
1272
808
  if (isa<llvm::ConstantInt>(Order)) {
1273
496
    auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1274
496
    // We should not ever get to a case where the ordering isn't a valid C ABI
1275
496
    // value, but it's hard to enforce that in general.
1276
496
    if (llvm::isValidAtomicOrderingCABI(ord))
1277
496
      switch ((llvm::AtomicOrderingCABI)ord) {
1278
56
      case llvm::AtomicOrderingCABI::relaxed:
1279
56
        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1280
56
                     llvm::AtomicOrdering::Monotonic, Scope);
1281
56
        break;
1282
42
      case llvm::AtomicOrderingCABI::consume:
1283
42
      case llvm::AtomicOrderingCABI::acquire:
1284
42
        if (IsStore)
1285
0
          break; // Avoid crashing on code with undefined behavior
1286
42
        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1287
42
                     llvm::AtomicOrdering::Acquire, Scope);
1288
42
        break;
1289
42
      case llvm::AtomicOrderingCABI::release:
1290
20
        if (IsLoad)
1291
0
          break; // Avoid crashing on code with undefined behavior
1292
20
        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1293
20
                     llvm::AtomicOrdering::Release, Scope);
1294
20
        break;
1295
60
      case llvm::AtomicOrderingCABI::acq_rel:
1296
60
        if (IsLoad || IsStore)
1297
0
          break; // Avoid crashing on code with undefined behavior
1298
60
        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1299
60
                     llvm::AtomicOrdering::AcquireRelease, Scope);
1300
60
        break;
1301
318
      case llvm::AtomicOrderingCABI::seq_cst:
1302
318
        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1303
318
                     llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1304
318
        break;
1305
496
      }
1306
496
    if (RValTy->isVoidType())
1307
140
      return RValue::get(nullptr);
1308
356
1309
356
    return convertTempToRValue(
1310
356
        Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1311
356
                                        Dest.getAddressSpace())),
1312
356
        RValTy, E->getExprLoc());
1313
356
  }
1314
312
1315
312
  // Long case, when Order isn't obviously constant.
1316
312
1317
312
  // Create all the relevant BB's
1318
312
  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1319
312
                   *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1320
312
                   *SeqCstBB = nullptr;
1321
312
  MonotonicBB = createBasicBlock("monotonic", CurFn);
1322
312
  if (!IsStore)
1323
225
    AcquireBB = createBasicBlock("acquire", CurFn);
1324
312
  if (!IsLoad)
1325
233
    ReleaseBB = createBasicBlock("release", CurFn);
1326
312
  if (!IsLoad && 
!IsStore233
)
1327
146
    AcqRelBB = createBasicBlock("acqrel", CurFn);
1328
312
  SeqCstBB = createBasicBlock("seqcst", CurFn);
1329
312
  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1330
312
1331
312
  // Create the switch for the split
1332
312
  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1333
312
  // doesn't matter unless someone is crazy enough to use something that
1334
312
  // doesn't fold to a constant for the ordering.
1335
312
  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1336
312
  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1337
312
1338
312
  // Emit all the different atomics
1339
312
  Builder.SetInsertPoint(MonotonicBB);
1340
312
  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1341
312
               llvm::AtomicOrdering::Monotonic, Scope);
1342
312
  Builder.CreateBr(ContBB);
1343
312
  if (!IsStore) {
1344
225
    Builder.SetInsertPoint(AcquireBB);
1345
225
    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1346
225
                 llvm::AtomicOrdering::Acquire, Scope);
1347
225
    Builder.CreateBr(ContBB);
1348
225
    SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1349
225
                AcquireBB);
1350
225
    SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1351
225
                AcquireBB);
1352
225
  }
1353
312
  if (!IsLoad) {
1354
233
    Builder.SetInsertPoint(ReleaseBB);
1355
233
    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1356
233
                 llvm::AtomicOrdering::Release, Scope);
1357
233
    Builder.CreateBr(ContBB);
1358
233
    SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1359
233
                ReleaseBB);
1360
233
  }
1361
312
  if (!IsLoad && 
!IsStore233
) {
1362
146
    Builder.SetInsertPoint(AcqRelBB);
1363
146
    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1364
146
                 llvm::AtomicOrdering::AcquireRelease, Scope);
1365
146
    Builder.CreateBr(ContBB);
1366
146
    SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1367
146
                AcqRelBB);
1368
146
  }
1369
312
  Builder.SetInsertPoint(SeqCstBB);
1370
312
  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1371
312
               llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1372
312
  Builder.CreateBr(ContBB);
1373
312
  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1374
312
              SeqCstBB);
1375
312
1376
312
  // Cleanup and return
1377
312
  Builder.SetInsertPoint(ContBB);
1378
312
  if (RValTy->isVoidType())
1379
87
    return RValue::get(nullptr);
1380
225
1381
225
  assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1382
225
  return convertTempToRValue(
1383
225
      Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1384
225
                                      Dest.getAddressSpace())),
1385
225
      RValTy, E->getExprLoc());
1386
225
}
1387
1388
3.45k
Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1389
3.45k
  unsigned addrspace =
1390
3.45k
    cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1391
3.45k
  llvm::IntegerType *ty =
1392
3.45k
    llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1393
3.45k
  return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1394
3.45k
}
1395
1396
819
Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1397
819
  llvm::Type *Ty = Addr.getElementType();
1398
819
  uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1399
819
  if (SourceSizeInBits != AtomicSizeInBits) {
1400
9
    Address Tmp = CreateTempAlloca();
1401
9
    CGF.Builder.CreateMemCpy(Tmp, Addr,
1402
9
                             std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1403
9
    Addr = Tmp;
1404
9
  }
1405
819
1406
819
  return emitCastToAtomicIntPointer(Addr);
1407
819
}
1408
1409
RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1410
                                             AggValueSlot resultSlot,
1411
                                             SourceLocation loc,
1412
157
                                             bool asValue) const {
1413
157
  if (LVal.isSimple()) {
1414
125
    if (EvaluationKind == TEK_Aggregate)
1415
11
      return resultSlot.asRValue();
1416
114
1417
114
    // Drill into the padding structure if we have one.
1418
114
    if (hasPadding())
1419
0
      addr = CGF.Builder.CreateStructGEP(addr, 0);
1420
114
1421
114
    // Otherwise, just convert the temporary to an r-value using the
1422
114
    // normal conversion routine.
1423
114
    return CGF.convertTempToRValue(addr, getValueType(), loc);
1424
114
  }
1425
32
  if (!asValue)
1426
8
    // Get RValue from temp memory as atomic for non-simple lvalues
1427
8
    return RValue::get(CGF.Builder.CreateLoad(addr));
1428
24
  if (LVal.isBitField())
1429
20
    return CGF.EmitLoadOfBitfieldLValue(
1430
20
        LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1431
20
                             LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1432
4
  if (LVal.isVectorElt())
1433
2
    return CGF.EmitLoadOfLValue(
1434
2
        LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1435
2
                              LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1436
2
  assert(LVal.isExtVectorElt());
1437
2
  return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1438
2
      addr, LVal.getExtVectorElts(), LVal.getType(),
1439
2
      LVal.getBaseInfo(), TBAAAccessInfo()));
1440
2
}
1441
1442
RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1443
                                             AggValueSlot ResultSlot,
1444
                                             SourceLocation Loc,
1445
324
                                             bool AsValue) const {
1446
324
  // Try not to in some easy cases.
1447
324
  assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1448
324
  if (getEvaluationKind() == TEK_Scalar &&
1449
324
      
(314
(314
(314
!LVal.isBitField()314
||
1450
314
         
LVal.getBitFieldInfo().Size == ValueSizeInBits48
) &&
1451
314
        
!hasPadding()266
) ||
1452
314
       
!AsValue60
)) {
1453
294
    auto *ValTy = AsValue
1454
294
                      ? 
CGF.ConvertTypeForMem(ValueTy)103
1455
294
                      : 
getAtomicAddress().getType()->getPointerElementType()191
;
1456
294
    if (ValTy->isIntegerTy()) {
1457
203
      assert(IntVal->getType() == ValTy && "Different integer types.");
1458
203
      return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1459
203
    } else 
if (91
ValTy->isPointerTy()91
)
1460
0
      return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1461
91
    else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1462
63
      return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1463
58
  }
1464
58
1465
58
  // Create a temporary.  This needs to be big enough to hold the
1466
58
  // atomic integer.
1467
58
  Address Temp = Address::invalid();
1468
58
  bool TempIsVolatile = false;
1469
58
  if (AsValue && 
getEvaluationKind() == TEK_Aggregate44
) {
1470
8
    assert(!ResultSlot.isIgnored());
1471
8
    Temp = ResultSlot.getAddress();
1472
8
    TempIsVolatile = ResultSlot.isVolatile();
1473
50
  } else {
1474
50
    Temp = CreateTempAlloca();
1475
50
  }
1476
58
1477
58
  // Slam the integer into the temporary.
1478
58
  Address CastTemp = emitCastToAtomicIntPointer(Temp);
1479
58
  CGF.Builder.CreateStore(IntVal, CastTemp)
1480
58
      ->setVolatile(TempIsVolatile);
1481
58
1482
58
  return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1483
58
}
1484
1485
void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1486
86
                                       llvm::AtomicOrdering AO, bool) {
1487
86
  // void __atomic_load(size_t size, void *mem, void *return, int order);
1488
86
  CallArgList Args;
1489
86
  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1490
86
  Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1491
86
           CGF.getContext().VoidPtrTy);
1492
86
  Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1493
86
           CGF.getContext().VoidPtrTy);
1494
86
  Args.add(
1495
86
      RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1496
86
      CGF.getContext().IntTy);
1497
86
  emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1498
86
}
1499
1500
llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1501
332
                                          bool IsVolatile) {
1502
332
  // Okay, we're doing this natively.
1503
332
  Address Addr = getAtomicAddressAsAtomicIntPointer();
1504
332
  llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1505
332
  Load->setAtomic(AO);
1506
332
1507
332
  // Other decoration.
1508
332
  if (IsVolatile)
1509
12
    Load->setVolatile(true);
1510
332
  CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1511
332
  return Load;
1512
332
}
1513
1514
/// An LValue is a candidate for having its loads and stores be made atomic if
1515
/// we are operating under /volatile:ms *and* the LValue itself is volatile and
1516
/// performing such an operation can be performed without a libcall.
1517
955k
bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1518
955k
  if (!CGM.getCodeGenOpts().MSVolatile) 
return false955k
;
1519
45
  AtomicInfo AI(*this, LV);
1520
45
  bool IsVolatile = LV.isVolatile() || 
hasVolatileMember(LV.getType())27
;
1521
45
  // An atomic is inline if we don't need to use a libcall.
1522
45
  bool AtomicIsInline = !AI.shouldUseLibcall();
1523
45
  // MSVC doesn't seem to do this for types wider than a pointer.
1524
45
  if (getContext().getTypeSize(LV.getType()) >
1525
45
      getContext().getTypeSize(getContext().getIntPtrType()))
1526
5
    return false;
1527
40
  return IsVolatile && 
AtomicIsInline15
;
1528
40
}
1529
1530
RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1531
69
                                       AggValueSlot Slot) {
1532
69
  llvm::AtomicOrdering AO;
1533
69
  bool IsVolatile = LV.isVolatileQualified();
1534
69
  if (LV.getType()->isAtomicType()) {
1535
62
    AO = llvm::AtomicOrdering::SequentiallyConsistent;
1536
62
  } else {
1537
7
    AO = llvm::AtomicOrdering::Acquire;
1538
7
    IsVolatile = true;
1539
7
  }
1540
69
  return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1541
69
}
1542
1543
RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1544
                                  bool AsValue, llvm::AtomicOrdering AO,
1545
175
                                  bool IsVolatile) {
1546
175
  // Check whether we should use a library call.
1547
175
  if (shouldUseLibcall()) {
1548
42
    Address TempAddr = Address::invalid();
1549
42
    if (LVal.isSimple() && 
!ResultSlot.isIgnored()38
) {
1550
3
      assert(getEvaluationKind() == TEK_Aggregate);
1551
3
      TempAddr = ResultSlot.getAddress();
1552
3
    } else
1553
39
      TempAddr = CreateTempAlloca();
1554
42
1555
42
    EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1556
42
1557
42
    // Okay, turn that back into the original value or whole atomic (for
1558
42
    // non-simple lvalues) type.
1559
42
    return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1560
42
  }
1561
133
1562
133
  // Okay, we're doing this natively.
1563
133
  auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1564
133
1565
133
  // If we're ignoring an aggregate return, don't do anything.
1566
133
  if (getEvaluationKind() == TEK_Aggregate && 
ResultSlot.isIgnored()8
)
1567
0
    return RValue::getAggregate(Address::invalid(), false);
1568
133
1569
133
  // Okay, turn that back into the original value or atomic (for non-simple
1570
133
  // lvalues) type.
1571
133
  return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1572
133
}
1573
1574
/// Emit a load from an l-value of atomic type.  Note that the r-value
1575
/// we produce is an r-value of the atomic *value* type.
1576
RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1577
                                       llvm::AtomicOrdering AO, bool IsVolatile,
1578
175
                                       AggValueSlot resultSlot) {
1579
175
  AtomicInfo Atomics(*this, src);
1580
175
  return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1581
175
                                IsVolatile);
1582
175
}
1583
1584
/// Copy an r-value into memory as part of storing to an atomic type.
1585
/// This needs to create a bit-pattern suitable for atomic operations.
1586
161
void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1587
161
  assert(LVal.isSimple());
1588
161
  // If we have an r-value, the rvalue should be of the atomic type,
1589
161
  // which means that the caller is responsible for having zeroed
1590
161
  // any padding.  Just do an aggregate copy of that type.
1591
161
  if (rvalue.isAggregate()) {
1592
0
    LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1593
0
    LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1594
0
                                    getAtomicType());
1595
0
    bool IsVolatile = rvalue.isVolatileQualified() ||
1596
0
                      LVal.isVolatileQualified();
1597
0
    CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1598
0
                          AggValueSlot::DoesNotOverlap, IsVolatile);
1599
0
    return;
1600
0
  }
1601
161
1602
161
  // Okay, otherwise we're copying stuff.
1603
161
1604
161
  // Zero out the buffer if necessary.
1605
161
  emitMemSetZeroIfNecessary();
1606
161
1607
161
  // Drill past the padding if present.
1608
161
  LValue TempLVal = projectValue();
1609
161
1610
161
  // Okay, store the rvalue in.
1611
161
  if (rvalue.isScalar()) {
1612
143
    CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1613
143
  } else {
1614
18
    CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1615
18
  }
1616
161
}
1617
1618
1619
/// Materialize an r-value into memory for the purposes of storing it
1620
/// to an atomic type.
1621
127
Address AtomicInfo::materializeRValue(RValue rvalue) const {
1622
127
  // Aggregate r-values are already in memory, and EmitAtomicStore
1623
127
  // requires them to be values of the atomic type.
1624
127
  if (rvalue.isAggregate())
1625
10
    return rvalue.getAggregateAddress();
1626
117
1627
117
  // Otherwise, make a temporary and materialize into it.
1628
117
  LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1629
117
  AtomicInfo Atomics(CGF, TempLV);
1630
117
  Atomics.emitCopyIntoMemory(rvalue);
1631
117
  return TempLV.getAddress(CGF);
1632
117
}
1633
1634
143
llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1635
143
  // If we've got a scalar value of the right size, try to avoid going
1636
143
  // through memory.
1637
143
  if (RVal.isScalar() && 
(133
!hasPadding()133
||
!LVal.isSimple()0
)) {
1638
133
    llvm::Value *Value = RVal.getScalarVal();
1639
133
    if (isa<llvm::IntegerType>(Value->getType()))
1640
103
      return CGF.EmitToMemory(Value, ValueTy);
1641
30
    else {
1642
30
      llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1643
30
          CGF.getLLVMContext(),
1644
30
          LVal.isSimple() ? getValueSizeInBits() : 
getAtomicSizeInBits()0
);
1645
30
      if (isa<llvm::PointerType>(Value->getType()))
1646
0
        return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1647
30
      else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1648
12
        return CGF.Builder.CreateBitCast(Value, InputIntTy);
1649
28
    }
1650
133
  }
1651
28
  // Otherwise, we need to go through memory.
1652
28
  // Put the r-value in memory.
1653
28
  Address Addr = materializeRValue(RVal);
1654
28
1655
28
  // Cast the temporary to the atomic int type and pull a value out.
1656
28
  Addr = emitCastToAtomicIntPointer(Addr);
1657
28
  return CGF.Builder.CreateLoad(Addr);
1658
28
}
1659
1660
std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1661
    llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1662
211
    llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1663
211
  // Do the atomic store.
1664
211
  Address Addr = getAtomicAddressAsAtomicIntPointer();
1665
211
  auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1666
211
                                               ExpectedVal, DesiredVal,
1667
211
                                               Success, Failure);
1668
211
  // Other decoration.
1669
211
  Inst->setVolatile(LVal.isVolatileQualified());
1670
211
  Inst->setWeak(IsWeak);
1671
211
1672
211
  // Okay, turn that back into the original value type.
1673
211
  auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1674
211
  auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1675
211
  return std::make_pair(PreviousVal, SuccessFailureVal);
1676
211
}
1677
1678
llvm::Value *
1679
AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1680
                                             llvm::Value *DesiredAddr,
1681
                                             llvm::AtomicOrdering Success,
1682
61
                                             llvm::AtomicOrdering Failure) {
1683
61
  // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1684
61
  // void *desired, int success, int failure);
1685
61
  CallArgList Args;
1686
61
  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1687
61
  Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1688
61
           CGF.getContext().VoidPtrTy);
1689
61
  Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1690
61
           CGF.getContext().VoidPtrTy);
1691
61
  Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1692
61
           CGF.getContext().VoidPtrTy);
1693
61
  Args.add(RValue::get(
1694
61
               llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1695
61
           CGF.getContext().IntTy);
1696
61
  Args.add(RValue::get(
1697
61
               llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1698
61
           CGF.getContext().IntTy);
1699
61
  auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1700
61
                                              CGF.getContext().BoolTy, Args);
1701
61
1702
61
  return SuccessFailureRVal.getScalarVal();
1703
61
}
1704
1705
std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1706
    RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1707
29
    llvm::AtomicOrdering Failure, bool IsWeak) {
1708
29
  if (isStrongerThan(Failure, Success))
1709
0
    // Don't assert on undefined behavior "failure argument shall be no stronger
1710
0
    // than the success argument".
1711
0
    Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1712
29
1713
29
  // Check whether we should use a library call.
1714
29
  if (shouldUseLibcall()) {
1715
17
    // Produce a source address.
1716
17
    Address ExpectedAddr = materializeRValue(Expected);
1717
17
    Address DesiredAddr = materializeRValue(Desired);
1718
17
    auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1719
17
                                                 DesiredAddr.getPointer(),
1720
17
                                                 Success, Failure);
1721
17
    return std::make_pair(
1722
17
        convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1723
17
                                  SourceLocation(), /*AsValue=*/false),
1724
17
        Res);
1725
17
  }
1726
12
1727
12
  // If we've got a scalar value of the right size, try to avoid going
1728
12
  // through memory.
1729
12
  auto *ExpectedVal = convertRValueToInt(Expected);
1730
12
  auto *DesiredVal = convertRValueToInt(Desired);
1731
12
  auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1732
12
                                         Failure, IsWeak);
1733
12
  return std::make_pair(
1734
12
      ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1735
12
                                SourceLocation(), /*AsValue=*/false),
1736
12
      Res.second);
1737
12
}
1738
1739
static void
1740
EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1741
                      const llvm::function_ref<RValue(RValue)> &UpdateOp,
1742
219
                      Address DesiredAddr) {
1743
219
  RValue UpRVal;
1744
219
  LValue AtomicLVal = Atomics.getAtomicLValue();
1745
219
  LValue DesiredLVal;
1746
219
  if (AtomicLVal.isSimple()) {
1747
171
    UpRVal = OldRVal;
1748
171
    DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1749
171
  } else {
1750
48
    // Build new lvalue for temp address.
1751
48
    Address Ptr = Atomics.materializeRValue(OldRVal);
1752
48
    LValue UpdateLVal;
1753
48
    if (AtomicLVal.isBitField()) {
1754
40
      UpdateLVal =
1755
40
          LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1756
40
                               AtomicLVal.getType(),
1757
40
                               AtomicLVal.getBaseInfo(),
1758
40
                               AtomicLVal.getTBAAInfo());
1759
40
      DesiredLVal =
1760
40
          LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1761
40
                               AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1762
40
                               AtomicLVal.getTBAAInfo());
1763
40
    } else 
if (8
AtomicLVal.isVectorElt()8
) {
1764
4
      UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1765
4
                                         AtomicLVal.getType(),
1766
4
                                         AtomicLVal.getBaseInfo(),
1767
4
                                         AtomicLVal.getTBAAInfo());
1768
4
      DesiredLVal = LValue::MakeVectorElt(
1769
4
          DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1770
4
          AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1771
4
    } else {
1772
4
      assert(AtomicLVal.isExtVectorElt());
1773
4
      UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1774
4
                                            AtomicLVal.getType(),
1775
4
                                            AtomicLVal.getBaseInfo(),
1776
4
                                            AtomicLVal.getTBAAInfo());
1777
4
      DesiredLVal = LValue::MakeExtVectorElt(
1778
4
          DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1779
4
          AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1780
4
    }
1781
48
    UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1782
48
  }
1783
219
  // Store new value in the corresponding memory area.
1784
219
  RValue NewRVal = UpdateOp(UpRVal);
1785
219
  if (NewRVal.isScalar()) {
1786
197
    CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1787
197
  } else {
1788
22
    assert(NewRVal.isComplex());
1789
22
    CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1790
22
                           /*isInit=*/false);
1791
22
  }
1792
219
}
1793
1794
void AtomicInfo::EmitAtomicUpdateLibcall(
1795
    llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1796
40
    bool IsVolatile) {
1797
40
  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1798
40
1799
40
  Address ExpectedAddr = CreateTempAlloca();
1800
40
1801
40
  EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1802
40
  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1803
40
  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1804
40
  CGF.EmitBlock(ContBB);
1805
40
  Address DesiredAddr = CreateTempAlloca();
1806
40
  if ((LVal.isBitField() && 
BFI.Size != ValueSizeInBits8
) ||
1807
40
      
requiresMemSetZero(getAtomicAddress().getElementType())32
) {
1808
8
    auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1809
8
    CGF.Builder.CreateStore(OldVal, DesiredAddr);
1810
8
  }
1811
40
  auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1812
40
                                           AggValueSlot::ignored(),
1813
40
                                           SourceLocation(), /*AsValue=*/false);
1814
40
  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1815
40
  auto *Res =
1816
40
      EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1817
40
                                       DesiredAddr.getPointer(),
1818
40
                                       AO, Failure);
1819
40
  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1820
40
  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1821
40
}
1822
1823
void AtomicInfo::EmitAtomicUpdateOp(
1824
    llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1825
179
    bool IsVolatile) {
1826
179
  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1827
179
1828
179
  // Do the atomic load.
1829
179
  auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1830
179
  // For non-simple lvalues perform compare-and-swap procedure.
1831
179
  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1832
179
  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1833
179
  auto *CurBB = CGF.Builder.GetInsertBlock();
1834
179
  CGF.EmitBlock(ContBB);
1835
179
  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1836
179
                                             /*NumReservedValues=*/2);
1837
179
  PHI->addIncoming(OldVal, CurBB);
1838
179
  Address NewAtomicAddr = CreateTempAlloca();
1839
179
  Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1840
179
  if ((LVal.isBitField() && 
BFI.Size != ValueSizeInBits32
) ||
1841
179
      
requiresMemSetZero(getAtomicAddress().getElementType())147
) {
1842
48
    CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1843
48
  }
1844
179
  auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1845
179
                                           SourceLocation(), /*AsValue=*/false);
1846
179
  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1847
179
  auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1848
179
  // Try to write new value using cmpxchg operation.
1849
179
  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1850
179
  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1851
179
  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1852
179
  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1853
179
}
1854
1855
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1856
24
                                  RValue UpdateRVal, Address DesiredAddr) {
1857
24
  LValue AtomicLVal = Atomics.getAtomicLValue();
1858
24
  LValue DesiredLVal;
1859
24
  // Build new lvalue for temp address.
1860
24
  if (AtomicLVal.isBitField()) {
1861
20
    DesiredLVal =
1862
20
        LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1863
20
                             AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1864
20
                             AtomicLVal.getTBAAInfo());
1865
20
  } else 
if (4
AtomicLVal.isVectorElt()4
) {
1866
2
    DesiredLVal =
1867
2
        LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1868
2
                              AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1869
2
                              AtomicLVal.getTBAAInfo());
1870
2
  } else {
1871
2
    assert(AtomicLVal.isExtVectorElt());
1872
2
    DesiredLVal = LValue::MakeExtVectorElt(
1873
2
        DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1874
2
        AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1875
2
  }
1876
24
  // Store new value in the corresponding memory area.
1877
24
  assert(UpdateRVal.isScalar());
1878
24
  CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1879
24
}
1880
1881
void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1882
4
                                         RValue UpdateRVal, bool IsVolatile) {
1883
4
  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1884
4
1885
4
  Address ExpectedAddr = CreateTempAlloca();
1886
4
1887
4
  EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1888
4
  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1889
4
  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1890
4
  CGF.EmitBlock(ContBB);
1891
4
  Address DesiredAddr = CreateTempAlloca();
1892
4
  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1893
4
      
requiresMemSetZero(getAtomicAddress().getElementType())0
) {
1894
4
    auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1895
4
    CGF.Builder.CreateStore(OldVal, DesiredAddr);
1896
4
  }
1897
4
  EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1898
4
  auto *Res =
1899
4
      EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1900
4
                                       DesiredAddr.getPointer(),
1901
4
                                       AO, Failure);
1902
4
  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1903
4
  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1904
4
}
1905
1906
void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1907
20
                                    bool IsVolatile) {
1908
20
  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1909
20
1910
20
  // Do the atomic load.
1911
20
  auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1912
20
  // For non-simple lvalues perform compare-and-swap procedure.
1913
20
  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1914
20
  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1915
20
  auto *CurBB = CGF.Builder.GetInsertBlock();
1916
20
  CGF.EmitBlock(ContBB);
1917
20
  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1918
20
                                             /*NumReservedValues=*/2);
1919
20
  PHI->addIncoming(OldVal, CurBB);
1920
20
  Address NewAtomicAddr = CreateTempAlloca();
1921
20
  Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1922
20
  if ((LVal.isBitField() && 
BFI.Size != ValueSizeInBits16
) ||
1923
20
      
requiresMemSetZero(getAtomicAddress().getElementType())4
) {
1924
20
    CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1925
20
  }
1926
20
  EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1927
20
  auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1928
20
  // Try to write new value using cmpxchg operation.
1929
20
  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1930
20
  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1931
20
  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1932
20
  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1933
20
}
1934
1935
void AtomicInfo::EmitAtomicUpdate(
1936
    llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1937
219
    bool IsVolatile) {
1938
219
  if (shouldUseLibcall()) {
1939
40
    EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1940
179
  } else {
1941
179
    EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1942
179
  }
1943
219
}
1944
1945
void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1946
24
                                  bool IsVolatile) {
1947
24
  if (shouldUseLibcall()) {
1948
4
    EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1949
20
  } else {
1950
20
    EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1951
20
  }
1952
24
}
1953
1954
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1955
81
                                      bool isInit) {
1956
81
  bool IsVolatile = lvalue.isVolatileQualified();
1957
81
  llvm::AtomicOrdering AO;
1958
81
  if (lvalue.getType()->isAtomicType()) {
1959
74
    AO = llvm::AtomicOrdering::SequentiallyConsistent;
1960
74
  } else {
1961
7
    AO = llvm::AtomicOrdering::Release;
1962
7
    IsVolatile = true;
1963
7
  }
1964
81
  return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1965
81
}
1966
1967
/// Emit a store to an l-value of atomic type.
1968
///
1969
/// Note that the r-value is expected to be an r-value *of the atomic
1970
/// type*; this means that for aggregate r-values, it should include
1971
/// storage for any padding that was necessary.
1972
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1973
                                      llvm::AtomicOrdering AO, bool IsVolatile,
1974
193
                                      bool isInit) {
1975
193
  // If this is an aggregate r-value, it should agree in type except
1976
193
  // maybe for address-space qualification.
1977
193
  assert(!rvalue.isAggregate() ||
1978
193
         rvalue.getAggregateAddress().getElementType() ==
1979
193
             dest.getAddress(*this).getElementType());
1980
193
1981
193
  AtomicInfo atomics(*this, dest);
1982
193
  LValue LVal = atomics.getAtomicLValue();
1983
193
1984
193
  // If this is an initialization, just put the value there normally.
1985
193
  if (LVal.isSimple()) {
1986
169
    if (isInit) {
1987
33
      atomics.emitCopyIntoMemory(rvalue);
1988
33
      return;
1989
33
    }
1990
136
1991
136
    // Check whether we should use a library call.
1992
136
    if (atomics.shouldUseLibcall()) {
1993
17
      // Produce a source address.
1994
17
      Address srcAddr = atomics.materializeRValue(rvalue);
1995
17
1996
17
      // void __atomic_store(size_t size, void *mem, void *val, int order)
1997
17
      CallArgList args;
1998
17
      args.add(RValue::get(atomics.getAtomicSizeValue()),
1999
17
               getContext().getSizeType());
2000
17
      args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
2001
17
               getContext().VoidPtrTy);
2002
17
      args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
2003
17
               getContext().VoidPtrTy);
2004
17
      args.add(
2005
17
          RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
2006
17
          getContext().IntTy);
2007
17
      emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
2008
17
      return;
2009
17
    }
2010
119
2011
119
    // Okay, we're doing this natively.
2012
119
    llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
2013
119
2014
119
    // Do the atomic store.
2015
119
    Address addr =
2016
119
        atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
2017
119
    intValue = Builder.CreateIntCast(
2018
119
        intValue, addr.getElementType(), /*isSigned=*/false);
2019
119
    llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
2020
119
2021
119
    if (AO == llvm::AtomicOrdering::Acquire)
2022
0
      AO = llvm::AtomicOrdering::Monotonic;
2023
119
    else if (AO == llvm::AtomicOrdering::AcquireRelease)
2024
0
      AO = llvm::AtomicOrdering::Release;
2025
119
    // Initializations don't need to be atomic.
2026
119
    if (!isInit)
2027
119
      store->setAtomic(AO);
2028
119
2029
119
    // Other decoration.
2030
119
    if (IsVolatile)
2031
10
      store->setVolatile(true);
2032
119
    CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
2033
119
    return;
2034
119
  }
2035
24
2036
24
  // Emit simple atomic update operation.
2037
24
  atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2038
24
}
2039
2040
/// Emit a compare-and-exchange op for atomic type.
2041
///
2042
std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
2043
    LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
2044
    llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
2045
29
    AggValueSlot Slot) {
2046
29
  // If this is an aggregate r-value, it should agree in type except
2047
29
  // maybe for address-space qualification.
2048
29
  assert(!Expected.isAggregate() ||
2049
29
         Expected.getAggregateAddress().getElementType() ==
2050
29
             Obj.getAddress(*this).getElementType());
2051
29
  assert(!Desired.isAggregate() ||
2052
29
         Desired.getAggregateAddress().getElementType() ==
2053
29
             Obj.getAddress(*this).getElementType());
2054
29
  AtomicInfo Atomics(*this, Obj);
2055
29
2056
29
  return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2057
29
                                           IsWeak);
2058
29
}
2059
2060
void CodeGenFunction::EmitAtomicUpdate(
2061
    LValue LVal, llvm::AtomicOrdering AO,
2062
219
    const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2063
219
  AtomicInfo Atomics(*this, LVal);
2064
219
  Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2065
219
}
2066
2067
20
void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
2068
20
  AtomicInfo atomics(*this, dest);
2069
20
2070
20
  switch (atomics.getEvaluationKind()) {
2071
9
  case TEK_Scalar: {
2072
9
    llvm::Value *value = EmitScalarExpr(init);
2073
9
    atomics.emitCopyIntoMemory(RValue::get(value));
2074
9
    return;
2075
0
  }
2076
0
2077
2
  case TEK_Complex: {
2078
2
    ComplexPairTy value = EmitComplexExpr(init);
2079
2
    atomics.emitCopyIntoMemory(RValue::getComplex(value));
2080
2
    return;
2081
0
  }
2082
0
2083
9
  case TEK_Aggregate: {
2084
9
    // Fix up the destination if the initializer isn't an expression
2085
9
    // of atomic type.
2086
9
    bool Zeroed = false;
2087
9
    if (!init->getType()->isAtomicType()) {
2088
5
      Zeroed = atomics.emitMemSetZeroIfNecessary();
2089
5
      dest = atomics.projectValue();
2090
5
    }
2091
9
2092
9
    // Evaluate the expression directly into the destination.
2093
9
    AggValueSlot slot = AggValueSlot::forLValue(
2094
9
        dest, *this, AggValueSlot::IsNotDestructed,
2095
9
        AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
2096
9
        AggValueSlot::DoesNotOverlap,
2097
9
        Zeroed ? 
AggValueSlot::IsZeroed2
:
AggValueSlot::IsNotZeroed7
);
2098
9
2099
9
    EmitAggExpr(init, slot);
2100
9
    return;
2101
0
  }
2102
0
  }
2103
0
  llvm_unreachable("bad evaluation kind");
2104
0
}