Clang Project

clang_source_code/lib/CodeGen/CGAtomic.cpp
1//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the code for emitting atomic operations.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCall.h"
14#include "CGRecordLayout.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/ASTContext.h"
19#include "clang/CodeGen/CGFunctionInfo.h"
20#include "clang/Frontend/FrontendDiagnostic.h"
21#include "llvm/ADT/DenseMap.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Intrinsics.h"
24#include "llvm/IR/Operator.h"
25
26using namespace clang;
27using namespace CodeGen;
28
29namespace {
30  class AtomicInfo {
31    CodeGenFunction &CGF;
32    QualType AtomicTy;
33    QualType ValueTy;
34    uint64_t AtomicSizeInBits;
35    uint64_t ValueSizeInBits;
36    CharUnits AtomicAlign;
37    CharUnits ValueAlign;
38    TypeEvaluationKind EvaluationKind;
39    bool UseLibcall;
40    LValue LVal;
41    CGBitFieldInfo BFI;
42  public:
43    AtomicInfo(CodeGenFunction &CGFLValue &lvalue)
44        : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45          EvaluationKind(TEK_Scalar), UseLibcall(true) {
46      assert(!lvalue.isGlobalReg());
47      ASTContext &C = CGF.getContext();
48      if (lvalue.isSimple()) {
49        AtomicTy = lvalue.getType();
50        if (auto *ATy = AtomicTy->getAs<AtomicType>())
51          ValueTy = ATy->getValueType();
52        else
53          ValueTy = AtomicTy;
54        EvaluationKind = CGF.getEvaluationKind(ValueTy);
55
56        uint64_t ValueAlignInBits;
57        uint64_t AtomicAlignInBits;
58        TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59        ValueSizeInBits = ValueTI.Width;
60        ValueAlignInBits = ValueTI.Align;
61
62        TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63        AtomicSizeInBits = AtomicTI.Width;
64        AtomicAlignInBits = AtomicTI.Align;
65
66        assert(ValueSizeInBits <= AtomicSizeInBits);
67        assert(ValueAlignInBits <= AtomicAlignInBits);
68
69        AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70        ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71        if (lvalue.getAlignment().isZero())
72          lvalue.setAlignment(AtomicAlign);
73
74        LVal = lvalue;
75      } else if (lvalue.isBitField()) {
76        ValueTy = lvalue.getType();
77        ValueSizeInBits = C.getTypeSize(ValueTy);
78        auto &OrigBFI = lvalue.getBitFieldInfo();
79        auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80        AtomicSizeInBits = C.toBits(
81            C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82                .alignTo(lvalue.getAlignment()));
83        auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
84        auto OffsetInChars =
85            (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86            lvalue.getAlignment();
87        VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88            VoidPtrAddr, OffsetInChars.getQuantity());
89        auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
90            VoidPtrAddr,
91            CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92            "atomic_bitfield_base");
93        BFI = OrigBFI;
94        BFI.Offset = Offset;
95        BFI.StorageSize = AtomicSizeInBits;
96        BFI.StorageOffset += OffsetInChars;
97        LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
98                                    BFI, lvalue.getType(), lvalue.getBaseInfo(),
99                                    lvalue.getTBAAInfo());
100        AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBitsOrigBFI.IsSigned);
101        if (AtomicTy.isNull()) {
102          llvm::APInt Size(
103              /*numBits=*/32,
104              C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
105          AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
106                                            /*IndexTypeQuals=*/0);
107        }
108        AtomicAlign = ValueAlign = lvalue.getAlignment();
109      } else if (lvalue.isVectorElt()) {
110        ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
111        ValueSizeInBits = C.getTypeSize(ValueTy);
112        AtomicTy = lvalue.getType();
113        AtomicSizeInBits = C.getTypeSize(AtomicTy);
114        AtomicAlign = ValueAlign = lvalue.getAlignment();
115        LVal = lvalue;
116      } else {
117        assert(lvalue.isExtVectorElt());
118        ValueTy = lvalue.getType();
119        ValueSizeInBits = C.getTypeSize(ValueTy);
120        AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
121            lvalue.getType(), lvalue.getExtVectorAddress()
122                                  .getElementType()->getVectorNumElements());
123        AtomicSizeInBits = C.getTypeSize(AtomicTy);
124        AtomicAlign = ValueAlign = lvalue.getAlignment();
125        LVal = lvalue;
126      }
127      UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
128          AtomicSizeInBitsC.toBits(lvalue.getAlignment()));
129    }
130
131    QualType getAtomicType() const { return AtomicTy; }
132    QualType getValueType() const { return ValueTy; }
133    CharUnits getAtomicAlignment() const { return AtomicAlign; }
134    uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
135    uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
136    TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
137    bool shouldUseLibcall() const { return UseLibcall; }
138    const LValue &getAtomicLValue() const { return LVal; }
139    llvm::Value *getAtomicPointer() const {
140      if (LVal.isSimple())
141        return LVal.getPointer();
142      else if (LVal.isBitField())
143        return LVal.getBitFieldPointer();
144      else if (LVal.isVectorElt())
145        return LVal.getVectorPointer();
146      assert(LVal.isExtVectorElt());
147      return LVal.getExtVectorPointer();
148    }
149    Address getAtomicAddress() const {
150      return Address(getAtomicPointer(), getAtomicAlignment());
151    }
152
153    Address getAtomicAddressAsAtomicIntPointer() const {
154      return emitCastToAtomicIntPointer(getAtomicAddress());
155    }
156
157    /// Is the atomic size larger than the underlying value type?
158    ///
159    /// Note that the absence of padding does not mean that atomic
160    /// objects are completely interchangeable with non-atomic
161    /// objects: we might have promoted the alignment of a type
162    /// without making it bigger.
163    bool hasPadding() const {
164      return (ValueSizeInBits != AtomicSizeInBits);
165    }
166
167    bool emitMemSetZeroIfNecessary() const;
168
169    llvm::Value *getAtomicSizeValue() const {
170      CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
171      return CGF.CGM.getSize(size);
172    }
173
174    /// Cast the given pointer to an integer pointer suitable for atomic
175    /// operations if the source.
176    Address emitCastToAtomicIntPointer(Address Addrconst;
177
178    /// If Addr is compatible with the iN that will be used for an atomic
179    /// operation, bitcast it. Otherwise, create a temporary that is suitable
180    /// and copy the value across.
181    Address convertToAtomicIntPointer(Address Addrconst;
182
183    /// Turn an atomic-layout object into an r-value.
184    RValue convertAtomicTempToRValue(Address addrAggValueSlot resultSlot,
185                                     SourceLocation locbool AsValueconst;
186
187    /// Converts a rvalue to integer value.
188    llvm::Value *convertRValueToInt(RValue RValconst;
189
190    RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
191                                     AggValueSlot ResultSlot,
192                                     SourceLocation Locbool AsValueconst;
193
194    /// Copy an atomic r-value into atomic-layout memory.
195    void emitCopyIntoMemory(RValue rvalueconst;
196
197    /// Project an l-value down to the value field.
198    LValue projectValue() const {
199      assert(LVal.isSimple());
200      Address addr = getAtomicAddress();
201      if (hasPadding())
202        addr = CGF.Builder.CreateStructGEP(addr0);
203
204      return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
205                              LVal.getBaseInfo(), LVal.getTBAAInfo());
206    }
207
208    /// Emits atomic load.
209    /// \returns Loaded value.
210    RValue EmitAtomicLoad(AggValueSlot ResultSlotSourceLocation Loc,
211                          bool AsValue, llvm::AtomicOrdering AO,
212                          bool IsVolatile);
213
214    /// Emits atomic compare-and-exchange sequence.
215    /// \param Expected Expected value.
216    /// \param Desired Desired value.
217    /// \param Success Atomic ordering for success operation.
218    /// \param Failure Atomic ordering for failed operation.
219    /// \param IsWeak true if atomic operation is weak, false otherwise.
220    /// \returns Pair of values: previous value from storage (value type) and
221    /// boolean flag (i1 type) with true if success and false otherwise.
222    std::pair<RValuellvm::Value *>
223    EmitAtomicCompareExchange(RValue ExpectedRValue Desired,
224                              llvm::AtomicOrdering Success =
225                                  llvm::AtomicOrdering::SequentiallyConsistent,
226                              llvm::AtomicOrdering Failure =
227                                  llvm::AtomicOrdering::SequentiallyConsistent,
228                              bool IsWeak = false);
229
230    /// Emits atomic update.
231    /// \param AO Atomic ordering.
232    /// \param UpdateOp Update operation for the current lvalue.
233    void EmitAtomicUpdate(llvm::AtomicOrdering AO,
234                          const llvm::function_ref<RValue(RValue)> &UpdateOp,
235                          bool IsVolatile);
236    /// Emits atomic update.
237    /// \param AO Atomic ordering.
238    void EmitAtomicUpdate(llvm::AtomicOrdering AORValue UpdateRVal,
239                          bool IsVolatile);
240
241    /// Materialize an atomic r-value in atomic-layout memory.
242    Address materializeRValue(RValue rvalueconst;
243
244    /// Creates temp alloca for intermediate operations on atomic value.
245    Address CreateTempAlloca() const;
246  private:
247    bool requiresMemSetZero(llvm::Type *typeconst;
248
249
250    /// Emits atomic load as a libcall.
251    void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
252                               llvm::AtomicOrdering AObool IsVolatile);
253    /// Emits atomic load as LLVM instruction.
254    llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AObool IsVolatile);
255    /// Emits atomic compare-and-exchange op as a libcall.
256    llvm::Value *EmitAtomicCompareExchangeLibcall(
257        llvm::Value *ExpectedAddrllvm::Value *DesiredAddr,
258        llvm::AtomicOrdering Success =
259            llvm::AtomicOrdering::SequentiallyConsistent,
260        llvm::AtomicOrdering Failure =
261            llvm::AtomicOrdering::SequentiallyConsistent);
262    /// Emits atomic compare-and-exchange op as LLVM instruction.
263    std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
264        llvm::Value *ExpectedValllvm::Value *DesiredVal,
265        llvm::AtomicOrdering Success =
266            llvm::AtomicOrdering::SequentiallyConsistent,
267        llvm::AtomicOrdering Failure =
268            llvm::AtomicOrdering::SequentiallyConsistent,
269        bool IsWeak = false);
270    /// Emit atomic update as libcalls.
271    void
272    EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
273                            const llvm::function_ref<RValue(RValue)> &UpdateOp,
274                            bool IsVolatile);
275    /// Emit atomic update as LLVM instructions.
276    void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
277                            const llvm::function_ref<RValue(RValue)> &UpdateOp,
278                            bool IsVolatile);
279    /// Emit atomic update as libcalls.
280    void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AORValue UpdateRVal,
281                                 bool IsVolatile);
282    /// Emit atomic update as LLVM instructions.
283    void EmitAtomicUpdateOp(llvm::AtomicOrdering AORValue UpdateRal,
284                            bool IsVolatile);
285  };
286}
287
288Address AtomicInfo::CreateTempAlloca() const {
289  Address TempAlloca = CGF.CreateMemTemp(
290      (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
291                                                                : AtomicTy,
292      getAtomicAlignment(),
293      "atomic-temp");
294  // Cast to pointer to value type for bitfields.
295  if (LVal.isBitField())
296    return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
297        TempAllocagetAtomicAddress().getType());
298  return TempAlloca;
299}
300
301static RValue emitAtomicLibcall(CodeGenFunction &CGF,
302                                StringRef fnName,
303                                QualType resultType,
304                                CallArgList &args) {
305  const CGFunctionInfo &fnInfo =
306    CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultTypeargs);
307  llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
308  llvm::FunctionCallee fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
309  auto callee = CGCallee::forDirect(fn);
310  return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
311}
312
313/// Does a store of the given IR type modify the full expected width?
314static bool isFullSizeType(CodeGenModule &CGMllvm::Type *type,
315                           uint64_t expectedSize) {
316  return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
317}
318
319/// Does the atomic type require memsetting to zero before initialization?
320///
321/// The IR type is provided as a way of making certain queries faster.
322bool AtomicInfo::requiresMemSetZero(llvm::Type *typeconst {
323  // If the atomic type has size padding, we definitely need a memset.
324  if (hasPadding()) return true;
325
326  // Otherwise, do some simple heuristics to try to avoid it:
327  switch (getEvaluationKind()) {
328  // For scalars and complexes, check whether the store size of the
329  // type uses the full size.
330  case TEK_Scalar:
331    return !isFullSizeType(CGF.CGMtypeAtomicSizeInBits);
332  case TEK_Complex:
333    return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
334                           AtomicSizeInBits / 2);
335
336  // Padding in structs has an undefined bit pattern.  User beware.
337  case TEK_Aggregate:
338    return false;
339  }
340  llvm_unreachable("bad evaluation kind");
341}
342
343bool AtomicInfo::emitMemSetZeroIfNecessary() const {
344  assert(LVal.isSimple());
345  llvm::Value *addr = LVal.getPointer();
346  if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
347    return false;
348
349  CGF.Builder.CreateMemSet(
350      addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
351      CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
352      LVal.getAlignment().getQuantity());
353  return true;
354}
355
356static void emitAtomicCmpXchg(CodeGenFunction &CGFAtomicExpr *Ebool IsWeak,
357                              Address DestAddress Ptr,
358                              Address Val1Address Val2,
359                              uint64_t Size,
360                              llvm::AtomicOrdering SuccessOrder,
361                              llvm::AtomicOrdering FailureOrder,
362                              llvm::SyncScope::ID Scope) {
363  // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
364  llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
365  llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
366
367  llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
368      Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
369      Scope);
370  Pair->setVolatile(E->isVolatile());
371  Pair->setWeak(IsWeak);
372
373  // Cmp holds the result of the compare-exchange operation: true on success,
374  // false on failure.
375  llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
376  llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
377
378  // This basic block is used to hold the store instruction if the operation
379  // failed.
380  llvm::BasicBlock *StoreExpectedBB =
381      CGF.createBasicBlock("cmpxchg.store_expected"CGF.CurFn);
382
383  // This basic block is the exit point of the operation, we should end up
384  // here regardless of whether or not the operation succeeded.
385  llvm::BasicBlock *ContinueBB =
386      CGF.createBasicBlock("cmpxchg.continue"CGF.CurFn);
387
388  // Update Expected if Expected isn't equal to Old, otherwise branch to the
389  // exit point.
390  CGF.Builder.CreateCondBr(CmpContinueBBStoreExpectedBB);
391
392  CGF.Builder.SetInsertPoint(StoreExpectedBB);
393  // Update the memory at Expected with Old's value.
394  CGF.Builder.CreateStore(OldVal1);
395  // Finally, branch to the exit point.
396  CGF.Builder.CreateBr(ContinueBB);
397
398  CGF.Builder.SetInsertPoint(ContinueBB);
399  // Update the memory at Dest with Cmp's value.
400  CGF.EmitStoreOfScalar(CmpCGF.MakeAddrLValue(DestE->getType()));
401}
402
403/// Given an ordering required on success, emit all possible cmpxchg
404/// instructions to cope with the provided (but possibly only dynamically known)
405/// FailureOrder.
406static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGFAtomicExpr *E,
407                                        bool IsWeakAddress DestAddress Ptr,
408                                        Address Val1Address Val2,
409                                        llvm::Value *FailureOrderVal,
410                                        uint64_t Size,
411                                        llvm::AtomicOrdering SuccessOrder,
412                                        llvm::SyncScope::ID Scope) {
413  llvm::AtomicOrdering FailureOrder;
414  if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
415    auto FOS = FO->getSExtValue();
416    if (!llvm::isValidAtomicOrderingCABI(FOS))
417      FailureOrder = llvm::AtomicOrdering::Monotonic;
418    else
419      switch ((llvm::AtomicOrderingCABI)FOS) {
420      case llvm::AtomicOrderingCABI::relaxed:
421      case llvm::AtomicOrderingCABI::release:
422      case llvm::AtomicOrderingCABI::acq_rel:
423        FailureOrder = llvm::AtomicOrdering::Monotonic;
424        break;
425      case llvm::AtomicOrderingCABI::consume:
426      case llvm::AtomicOrderingCABI::acquire:
427        FailureOrder = llvm::AtomicOrdering::Acquire;
428        break;
429      case llvm::AtomicOrderingCABI::seq_cst:
430        FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
431        break;
432      }
433    if (isStrongerThan(FailureOrder, SuccessOrder)) {
434      // Don't assert on undefined behavior "failure argument shall be no
435      // stronger than the success argument".
436      FailureOrder =
437          llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
438    }
439    emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
440                      FailureOrder, Scope);
441    return;
442  }
443
444  // Create all the relevant BB's
445  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
446                   *SeqCstBB = nullptr;
447  MonotonicBB = CGF.createBasicBlock("monotonic_fail"CGF.CurFn);
448  if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
449      SuccessOrder != llvm::AtomicOrdering::Release)
450    AcquireBB = CGF.createBasicBlock("acquire_fail"CGF.CurFn);
451  if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
452    SeqCstBB = CGF.createBasicBlock("seqcst_fail"CGF.CurFn);
453
454  llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue"CGF.CurFn);
455
456  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderValMonotonicBB);
457
458  // Emit all the different atomics
459
460  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
461  // doesn't matter unless someone is crazy enough to use something that
462  // doesn't fold to a constant for the ordering.
463  CGF.Builder.SetInsertPoint(MonotonicBB);
464  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
465                    Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
466  CGF.Builder.CreateBr(ContBB);
467
468  if (AcquireBB) {
469    CGF.Builder.SetInsertPoint(AcquireBB);
470    emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
471                      Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
472    CGF.Builder.CreateBr(ContBB);
473    SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
474                AcquireBB);
475    SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
476                AcquireBB);
477  }
478  if (SeqCstBB) {
479    CGF.Builder.SetInsertPoint(SeqCstBB);
480    emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
481                      llvm::AtomicOrdering::SequentiallyConsistent, Scope);
482    CGF.Builder.CreateBr(ContBB);
483    SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
484                SeqCstBB);
485  }
486
487  CGF.Builder.SetInsertPoint(ContBB);
488}
489
490static void EmitAtomicOp(CodeGenFunction &CGFAtomicExpr *EAddress Dest,
491                         Address PtrAddress Val1Address Val2,
492                         llvm::Value *IsWeakllvm::Value *FailureOrder,
493                         uint64_t Size, llvm::AtomicOrdering Order,
494                         llvm::SyncScope::ID Scope) {
495  llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
496  llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
497
498  switch (E->getOp()) {
499  case AtomicExpr::AO__c11_atomic_init:
500  case AtomicExpr::AO__opencl_atomic_init:
501    llvm_unreachable("Already handled!");
502
503  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
504  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
505    emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
506                                FailureOrder, Size, Order, Scope);
507    return;
508  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
509  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
510    emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
511                                FailureOrder, Size, Order, Scope);
512    return;
513  case AtomicExpr::AO__atomic_compare_exchange:
514  case AtomicExpr::AO__atomic_compare_exchange_n: {
515    if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
516      emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
517                                  Val1, Val2, FailureOrder, Size, Order, Scope);
518    } else {
519      // Create all the relevant BB's
520      llvm::BasicBlock *StrongBB =
521          CGF.createBasicBlock("cmpxchg.strong"CGF.CurFn);
522      llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak"CGF.CurFn);
523      llvm::BasicBlock *ContBB =
524          CGF.createBasicBlock("cmpxchg.continue"CGF.CurFn);
525
526      llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeakWeakBB);
527      SI->addCase(CGF.Builder.getInt1(false), StrongBB);
528
529      CGF.Builder.SetInsertPoint(StrongBB);
530      emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
531                                  FailureOrder, Size, Order, Scope);
532      CGF.Builder.CreateBr(ContBB);
533
534      CGF.Builder.SetInsertPoint(WeakBB);
535      emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
536                                  FailureOrder, Size, Order, Scope);
537      CGF.Builder.CreateBr(ContBB);
538
539      CGF.Builder.SetInsertPoint(ContBB);
540    }
541    return;
542  }
543  case AtomicExpr::AO__c11_atomic_load:
544  case AtomicExpr::AO__opencl_atomic_load:
545  case AtomicExpr::AO__atomic_load_n:
546  case AtomicExpr::AO__atomic_load: {
547    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
548    Load->setAtomic(Order, Scope);
549    Load->setVolatile(E->isVolatile());
550    CGF.Builder.CreateStore(Load, Dest);
551    return;
552  }
553
554  case AtomicExpr::AO__c11_atomic_store:
555  case AtomicExpr::AO__opencl_atomic_store:
556  case AtomicExpr::AO__atomic_store:
557  case AtomicExpr::AO__atomic_store_n: {
558    llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
559    llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
560    Store->setAtomic(Order, Scope);
561    Store->setVolatile(E->isVolatile());
562    return;
563  }
564
565  case AtomicExpr::AO__c11_atomic_exchange:
566  case AtomicExpr::AO__opencl_atomic_exchange:
567  case AtomicExpr::AO__atomic_exchange_n:
568  case AtomicExpr::AO__atomic_exchange:
569    Op = llvm::AtomicRMWInst::Xchg;
570    break;
571
572  case AtomicExpr::AO__atomic_add_fetch:
573    PostOp = llvm::Instruction::Add;
574    LLVM_FALLTHROUGH;
575  case AtomicExpr::AO__c11_atomic_fetch_add:
576  case AtomicExpr::AO__opencl_atomic_fetch_add:
577  case AtomicExpr::AO__atomic_fetch_add:
578    Op = llvm::AtomicRMWInst::Add;
579    break;
580
581  case AtomicExpr::AO__atomic_sub_fetch:
582    PostOp = llvm::Instruction::Sub;
583    LLVM_FALLTHROUGH;
584  case AtomicExpr::AO__c11_atomic_fetch_sub:
585  case AtomicExpr::AO__opencl_atomic_fetch_sub:
586  case AtomicExpr::AO__atomic_fetch_sub:
587    Op = llvm::AtomicRMWInst::Sub;
588    break;
589
590  case AtomicExpr::AO__opencl_atomic_fetch_min:
591  case AtomicExpr::AO__atomic_fetch_min:
592    Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
593                                                  : llvm::AtomicRMWInst::UMin;
594    break;
595
596  case AtomicExpr::AO__opencl_atomic_fetch_max:
597  case AtomicExpr::AO__atomic_fetch_max:
598    Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
599                                                  : llvm::AtomicRMWInst::UMax;
600    break;
601
602  case AtomicExpr::AO__atomic_and_fetch:
603    PostOp = llvm::Instruction::And;
604    LLVM_FALLTHROUGH;
605  case AtomicExpr::AO__c11_atomic_fetch_and:
606  case AtomicExpr::AO__opencl_atomic_fetch_and:
607  case AtomicExpr::AO__atomic_fetch_and:
608    Op = llvm::AtomicRMWInst::And;
609    break;
610
611  case AtomicExpr::AO__atomic_or_fetch:
612    PostOp = llvm::Instruction::Or;
613    LLVM_FALLTHROUGH;
614  case AtomicExpr::AO__c11_atomic_fetch_or:
615  case AtomicExpr::AO__opencl_atomic_fetch_or:
616  case AtomicExpr::AO__atomic_fetch_or:
617    Op = llvm::AtomicRMWInst::Or;
618    break;
619
620  case AtomicExpr::AO__atomic_xor_fetch:
621    PostOp = llvm::Instruction::Xor;
622    LLVM_FALLTHROUGH;
623  case AtomicExpr::AO__c11_atomic_fetch_xor:
624  case AtomicExpr::AO__opencl_atomic_fetch_xor:
625  case AtomicExpr::AO__atomic_fetch_xor:
626    Op = llvm::AtomicRMWInst::Xor;
627    break;
628
629  case AtomicExpr::AO__atomic_nand_fetch:
630    PostOp = llvm::Instruction::And; // the NOT is special cased below
631    LLVM_FALLTHROUGH;
632  case AtomicExpr::AO__atomic_fetch_nand:
633    Op = llvm::AtomicRMWInst::Nand;
634    break;
635  }
636
637  llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
638  llvm::AtomicRMWInst *RMWI =
639      CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
640  RMWI->setVolatile(E->isVolatile());
641
642  // For __atomic_*_fetch operations, perform the operation again to
643  // determine the value which was written.
644  llvm::Value *Result = RMWI;
645  if (PostOp)
646    Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
647  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
648    Result = CGF.Builder.CreateNot(Result);
649  CGF.Builder.CreateStore(ResultDest);
650}
651
652// This function emits any expression (scalar, complex, or aggregate)
653// into a temporary alloca.
654static Address
655EmitValToTemp(CodeGenFunction &CGFExpr *E) {
656  Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
657  CGF.EmitAnyExprToMem(EDeclPtrE->getType().getQualifiers(),
658                       /*Init*/ true);
659  return DeclPtr;
660}
661
662static void EmitAtomicOp(CodeGenFunction &CGFAtomicExpr *ExprAddress Dest,
663                         Address PtrAddress Val1Address Val2,
664                         llvm::Value *IsWeakllvm::Value *FailureOrder,
665                         uint64_t Size, llvm::AtomicOrdering Order,
666                         llvm::Value *Scope) {
667  auto ScopeModel = Expr->getScopeModel();
668
669  // LLVM atomic instructions always have synch scope. If clang atomic
670  // expression has no scope operand, use default LLVM synch scope.
671  if (!ScopeModel) {
672    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
673                 Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
674    return;
675  }
676
677  // Handle constant scope.
678  if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
679    auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
680        CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
681        Order, CGF.CGM.getLLVMContext());
682    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
683                 Order, SCID);
684    return;
685  }
686
687  // Handle non-constant scope.
688  auto &Builder = CGF.Builder;
689  auto Scopes = ScopeModel->getRuntimeValues();
690  llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
691  for (auto S : Scopes)
692    BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
693
694  llvm::BasicBlock *ContBB =
695      CGF.createBasicBlock("atomic.scope.continue"CGF.CurFn);
696
697  auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
698  // If unsupported synch scope is encountered at run time, assume a fallback
699  // synch scope value.
700  auto FallBack = ScopeModel->getFallBackValue();
701  llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
702  for (auto S : Scopes) {
703    auto *B = BB[S];
704    if (S != FallBack)
705      SI->addCase(Builder.getInt32(S), B);
706
707    Builder.SetInsertPoint(B);
708    EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
709                 Order,
710                 CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
711                                                         ScopeModel->map(S),
712                                                         Order,
713                                                         CGF.getLLVMContext()));
714    Builder.CreateBr(ContBB);
715  }
716
717  Builder.SetInsertPoint(ContBB);
718}
719
720static void
721AddDirectArgument(CodeGenFunction &CGFCallArgList &Args,
722                  bool UseOptimizedLibcallllvm::Value *ValQualType ValTy,
723                  SourceLocation LocCharUnits SizeInChars) {
724  if (UseOptimizedLibcall) {
725    // Load value and pass it to the function directly.
726    CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
727    int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
728    ValTy =
729        CGF.getContext().getIntTypeForBitwidth(SizeInBits/*Signed=*/false);
730    llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
731                                                SizeInBits)->getPointerTo();
732    Address Ptr = Address(CGF.Builder.CreateBitCast(ValIPtrTy), Align);
733    Val = CGF.EmitLoadOfScalar(Ptrfalse,
734                               CGF.getContext().getPointerType(ValTy),
735                               Loc);
736    // Coerce the value into an appropriately sized integer type.
737    Args.add(RValue::get(Val), ValTy);
738  } else {
739    // Non-optimized functions always take a reference.
740    Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
741                         CGF.getContext().VoidPtrTy);
742  }
743}
744
745RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
746  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
747  QualType MemTy = AtomicTy;
748  if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
749    MemTy = AT->getValueType();
750  llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
751
752  Address Val1 = Address::invalid();
753  Address Val2 = Address::invalid();
754  Address Dest = Address::invalid();
755  Address Ptr = EmitPointerWithAlignment(E->getPtr());
756
757  if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
758      E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
759    LValue lvalue = MakeAddrLValue(PtrAtomicTy);
760    EmitAtomicInit(E->getVal1(), lvalue);
761    return RValue::get(nullptr);
762  }
763
764  CharUnits sizeCharsalignChars;
765  std::tie(sizeCharsalignChars) = getContext().getTypeInfoInChars(AtomicTy);
766  uint64_t Size = sizeChars.getQuantity();
767  unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
768
769  bool Oversized = getContext().toBits(sizeChars) > MaxInlineWidthInBits;
770  bool Misaligned = (Ptr.getAlignment() % sizeChars) != 0;
771  bool UseLibcall = Misaligned | Oversized;
772
773  if (UseLibcall) {
774    CGM.getDiags().Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
775        << !Oversized;
776  }
777
778  llvm::Value *Order = EmitScalarExpr(E->getOrder());
779  llvm::Value *Scope =
780      E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
781
782  switch (E->getOp()) {
783  case AtomicExpr::AO__c11_atomic_init:
784  case AtomicExpr::AO__opencl_atomic_init:
785    llvm_unreachable("Already handled above with EmitAtomicInit!");
786
787  case AtomicExpr::AO__c11_atomic_load:
788  case AtomicExpr::AO__opencl_atomic_load:
789  case AtomicExpr::AO__atomic_load_n:
790    break;
791
792  case AtomicExpr::AO__atomic_load:
793    Dest = EmitPointerWithAlignment(E->getVal1());
794    break;
795
796  case AtomicExpr::AO__atomic_store:
797    Val1 = EmitPointerWithAlignment(E->getVal1());
798    break;
799
800  case AtomicExpr::AO__atomic_exchange:
801    Val1 = EmitPointerWithAlignment(E->getVal1());
802    Dest = EmitPointerWithAlignment(E->getVal2());
803    break;
804
805  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
806  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
807  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
808  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
809  case AtomicExpr::AO__atomic_compare_exchange_n:
810  case AtomicExpr::AO__atomic_compare_exchange:
811    Val1 = EmitPointerWithAlignment(E->getVal1());
812    if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
813      Val2 = EmitPointerWithAlignment(E->getVal2());
814    else
815      Val2 = EmitValToTemp(*thisE->getVal2());
816    OrderFail = EmitScalarExpr(E->getOrderFail());
817    if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
818        E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
819      IsWeak = EmitScalarExpr(E->getWeak());
820    break;
821
822  case AtomicExpr::AO__c11_atomic_fetch_add:
823  case AtomicExpr::AO__c11_atomic_fetch_sub:
824  case AtomicExpr::AO__opencl_atomic_fetch_add:
825  case AtomicExpr::AO__opencl_atomic_fetch_sub:
826    if (MemTy->isPointerType()) {
827      // For pointer arithmetic, we're required to do a bit of math:
828      // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
829      // ... but only for the C11 builtins. The GNU builtins expect the
830      // user to multiply by sizeof(T).
831      QualType Val1Ty = E->getVal1()->getType();
832      llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
833      CharUnits PointeeIncAmt =
834          getContext().getTypeSizeInChars(MemTy->getPointeeType());
835      Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
836      auto Temp = CreateMemTemp(Val1Ty".atomictmp");
837      Val1 = Temp;
838      EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
839      break;
840    }
841      LLVM_FALLTHROUGH;
842  case AtomicExpr::AO__atomic_fetch_add:
843  case AtomicExpr::AO__atomic_fetch_sub:
844  case AtomicExpr::AO__atomic_add_fetch:
845  case AtomicExpr::AO__atomic_sub_fetch:
846  case AtomicExpr::AO__c11_atomic_store:
847  case AtomicExpr::AO__c11_atomic_exchange:
848  case AtomicExpr::AO__opencl_atomic_store:
849  case AtomicExpr::AO__opencl_atomic_exchange:
850  case AtomicExpr::AO__atomic_store_n:
851  case AtomicExpr::AO__atomic_exchange_n:
852  case AtomicExpr::AO__c11_atomic_fetch_and:
853  case AtomicExpr::AO__c11_atomic_fetch_or:
854  case AtomicExpr::AO__c11_atomic_fetch_xor:
855  case AtomicExpr::AO__opencl_atomic_fetch_and:
856  case AtomicExpr::AO__opencl_atomic_fetch_or:
857  case AtomicExpr::AO__opencl_atomic_fetch_xor:
858  case AtomicExpr::AO__opencl_atomic_fetch_min:
859  case AtomicExpr::AO__opencl_atomic_fetch_max:
860  case AtomicExpr::AO__atomic_fetch_and:
861  case AtomicExpr::AO__atomic_fetch_or:
862  case AtomicExpr::AO__atomic_fetch_xor:
863  case AtomicExpr::AO__atomic_fetch_nand:
864  case AtomicExpr::AO__atomic_and_fetch:
865  case AtomicExpr::AO__atomic_or_fetch:
866  case AtomicExpr::AO__atomic_xor_fetch:
867  case AtomicExpr::AO__atomic_nand_fetch:
868  case AtomicExpr::AO__atomic_fetch_min:
869  case AtomicExpr::AO__atomic_fetch_max:
870    Val1 = EmitValToTemp(*thisE->getVal1());
871    break;
872  }
873
874  QualType RValTy = E->getType().getUnqualifiedType();
875
876  // The inlined atomics only function on iN types, where N is a power of 2. We
877  // need to make sure (via temporaries if necessary) that all incoming values
878  // are compatible.
879  LValue AtomicVal = MakeAddrLValue(PtrAtomicTy);
880  AtomicInfo Atomics(*thisAtomicVal);
881
882  Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
883  if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
884  if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
885  if (Dest.isValid())
886    Dest = Atomics.emitCastToAtomicIntPointer(Dest);
887  else if (E->isCmpXChg())
888    Dest = CreateMemTemp(RValTy"cmpxchg.bool");
889  else if (!RValTy->isVoidType())
890    Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
891
892  // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
893  if (UseLibcall) {
894    bool UseOptimizedLibcall = false;
895    switch (E->getOp()) {
896    case AtomicExpr::AO__c11_atomic_init:
897    case AtomicExpr::AO__opencl_atomic_init:
898      llvm_unreachable("Already handled above with EmitAtomicInit!");
899
900    case AtomicExpr::AO__c11_atomic_fetch_add:
901    case AtomicExpr::AO__opencl_atomic_fetch_add:
902    case AtomicExpr::AO__atomic_fetch_add:
903    case AtomicExpr::AO__c11_atomic_fetch_and:
904    case AtomicExpr::AO__opencl_atomic_fetch_and:
905    case AtomicExpr::AO__atomic_fetch_and:
906    case AtomicExpr::AO__c11_atomic_fetch_or:
907    case AtomicExpr::AO__opencl_atomic_fetch_or:
908    case AtomicExpr::AO__atomic_fetch_or:
909    case AtomicExpr::AO__atomic_fetch_nand:
910    case AtomicExpr::AO__c11_atomic_fetch_sub:
911    case AtomicExpr::AO__opencl_atomic_fetch_sub:
912    case AtomicExpr::AO__atomic_fetch_sub:
913    case AtomicExpr::AO__c11_atomic_fetch_xor:
914    case AtomicExpr::AO__opencl_atomic_fetch_xor:
915    case AtomicExpr::AO__opencl_atomic_fetch_min:
916    case AtomicExpr::AO__opencl_atomic_fetch_max:
917    case AtomicExpr::AO__atomic_fetch_xor:
918    case AtomicExpr::AO__atomic_add_fetch:
919    case AtomicExpr::AO__atomic_and_fetch:
920    case AtomicExpr::AO__atomic_nand_fetch:
921    case AtomicExpr::AO__atomic_or_fetch:
922    case AtomicExpr::AO__atomic_sub_fetch:
923    case AtomicExpr::AO__atomic_xor_fetch:
924    case AtomicExpr::AO__atomic_fetch_min:
925    case AtomicExpr::AO__atomic_fetch_max:
926      // For these, only library calls for certain sizes exist.
927      UseOptimizedLibcall = true;
928      break;
929
930    case AtomicExpr::AO__atomic_load:
931    case AtomicExpr::AO__atomic_store:
932    case AtomicExpr::AO__atomic_exchange:
933    case AtomicExpr::AO__atomic_compare_exchange:
934      // Use the generic version if we don't know that the operand will be
935      // suitably aligned for the optimized version.
936      if (Misaligned)
937        break;
938      LLVM_FALLTHROUGH;
939    case AtomicExpr::AO__c11_atomic_load:
940    case AtomicExpr::AO__c11_atomic_store:
941    case AtomicExpr::AO__c11_atomic_exchange:
942    case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
943    case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
944    case AtomicExpr::AO__opencl_atomic_load:
945    case AtomicExpr::AO__opencl_atomic_store:
946    case AtomicExpr::AO__opencl_atomic_exchange:
947    case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
948    case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
949    case AtomicExpr::AO__atomic_load_n:
950    case AtomicExpr::AO__atomic_store_n:
951    case AtomicExpr::AO__atomic_exchange_n:
952    case AtomicExpr::AO__atomic_compare_exchange_n:
953      // Only use optimized library calls for sizes for which they exist.
954      // FIXME: Size == 16 optimized library functions exist too.
955      if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
956        UseOptimizedLibcall = true;
957      break;
958    }
959
960    CallArgList Args;
961    if (!UseOptimizedLibcall) {
962      // For non-optimized library calls, the size is the first parameter
963      Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
964               getContext().getSizeType());
965    }
966    // Atomic address is the first or second parameter
967    // The OpenCL atomic library functions only accept pointer arguments to
968    // generic address space.
969    auto CastToGenericAddrSpace = [&](llvm::Value *VQualType PT) {
970      if (!E->isOpenCL())
971        return V;
972      auto AS = PT->getAs<PointerType>()->getPointeeType().getAddressSpace();
973      if (AS == LangAS::opencl_generic)
974        return V;
975      auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
976      auto T = V->getType();
977      auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
978
979      return getTargetHooks().performAddrSpaceCast(
980          *this, V, AS, LangAS::opencl_generic, DestType, false);
981    };
982
983    Args.add(RValue::get(CastToGenericAddrSpace(
984                 EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
985             getContext().VoidPtrTy);
986
987    std::string LibCallName;
988    QualType LoweredMemTy =
989      MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
990    QualType RetTy;
991    bool HaveRetTy = false;
992    llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
993    switch (E->getOp()) {
994    case AtomicExpr::AO__c11_atomic_init:
995    case AtomicExpr::AO__opencl_atomic_init:
996      llvm_unreachable("Already handled!");
997
998    // There is only one libcall for compare an exchange, because there is no
999    // optimisation benefit possible from a libcall version of a weak compare
1000    // and exchange.
1001    // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1002    //                                void *desired, int success, int failure)
1003    // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
1004    //                                  int success, int failure)
1005    case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1006    case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1007    case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1008    case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1009    case AtomicExpr::AO__atomic_compare_exchange:
1010    case AtomicExpr::AO__atomic_compare_exchange_n:
1011      LibCallName = "__atomic_compare_exchange";
1012      RetTy = getContext().BoolTy;
1013      HaveRetTy = true;
1014      Args.add(
1015          RValue::get(CastToGenericAddrSpace(
1016              EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
1017          getContext().VoidPtrTy);
1018      AddDirectArgument(*thisArgsUseOptimizedLibcallVal2.getPointer(),
1019                        MemTyE->getExprLoc(), sizeChars);
1020      Args.add(RValue::get(Order), getContext().IntTy);
1021      Order = OrderFail;
1022      break;
1023    // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1024    //                        int order)
1025    // T __atomic_exchange_N(T *mem, T val, int order)
1026    case AtomicExpr::AO__c11_atomic_exchange:
1027    case AtomicExpr::AO__opencl_atomic_exchange:
1028    case AtomicExpr::AO__atomic_exchange_n:
1029    case AtomicExpr::AO__atomic_exchange:
1030      LibCallName = "__atomic_exchange";
1031      AddDirectArgument(*thisArgsUseOptimizedLibcallVal1.getPointer(),
1032                        MemTyE->getExprLoc(), sizeChars);
1033      break;
1034    // void __atomic_store(size_t size, void *mem, void *val, int order)
1035    // void __atomic_store_N(T *mem, T val, int order)
1036    case AtomicExpr::AO__c11_atomic_store:
1037    case AtomicExpr::AO__opencl_atomic_store:
1038    case AtomicExpr::AO__atomic_store:
1039    case AtomicExpr::AO__atomic_store_n:
1040      LibCallName = "__atomic_store";
1041      RetTy = getContext().VoidTy;
1042      HaveRetTy = true;
1043      AddDirectArgument(*thisArgsUseOptimizedLibcallVal1.getPointer(),
1044                        MemTyE->getExprLoc(), sizeChars);
1045      break;
1046    // void __atomic_load(size_t size, void *mem, void *return, int order)
1047    // T __atomic_load_N(T *mem, int order)
1048    case AtomicExpr::AO__c11_atomic_load:
1049    case AtomicExpr::AO__opencl_atomic_load:
1050    case AtomicExpr::AO__atomic_load:
1051    case AtomicExpr::AO__atomic_load_n:
1052      LibCallName = "__atomic_load";
1053      break;
1054    // T __atomic_add_fetch_N(T *mem, T val, int order)
1055    // T __atomic_fetch_add_N(T *mem, T val, int order)
1056    case AtomicExpr::AO__atomic_add_fetch:
1057      PostOp = llvm::Instruction::Add;
1058      LLVM_FALLTHROUGH;
1059    case AtomicExpr::AO__c11_atomic_fetch_add:
1060    case AtomicExpr::AO__opencl_atomic_fetch_add:
1061    case AtomicExpr::AO__atomic_fetch_add:
1062      LibCallName = "__atomic_fetch_add";
1063      AddDirectArgument(*thisArgsUseOptimizedLibcallVal1.getPointer(),
1064                        LoweredMemTyE->getExprLoc(), sizeChars);
1065      break;
1066    // T __atomic_and_fetch_N(T *mem, T val, int order)
1067    // T __atomic_fetch_and_N(T *mem, T val, int order)
1068    case AtomicExpr::AO__atomic_and_fetch:
1069      PostOp = llvm::Instruction::And;
1070      LLVM_FALLTHROUGH;
1071    case AtomicExpr::AO__c11_atomic_fetch_and:
1072    case AtomicExpr::AO__opencl_atomic_fetch_and:
1073    case AtomicExpr::AO__atomic_fetch_and:
1074      LibCallName = "__atomic_fetch_and";
1075      AddDirectArgument(*thisArgsUseOptimizedLibcallVal1.getPointer(),
1076                        MemTyE->getExprLoc(), sizeChars);
1077      break;
1078    // T __atomic_or_fetch_N(T *mem, T val, int order)
1079    // T __atomic_fetch_or_N(T *mem, T val, int order)
1080    case AtomicExpr::AO__atomic_or_fetch:
1081      PostOp = llvm::Instruction::Or;
1082      LLVM_FALLTHROUGH;
1083    case AtomicExpr::AO__c11_atomic_fetch_or:
1084    case AtomicExpr::AO__opencl_atomic_fetch_or:
1085    case AtomicExpr::AO__atomic_fetch_or:
1086      LibCallName = "__atomic_fetch_or";
1087      AddDirectArgument(*thisArgsUseOptimizedLibcallVal1.getPointer(),
1088                        MemTyE->getExprLoc(), sizeChars);
1089      break;
1090    // T __atomic_sub_fetch_N(T *mem, T val, int order)
1091    // T __atomic_fetch_sub_N(T *mem, T val, int order)
1092    case AtomicExpr::AO__atomic_sub_fetch:
1093      PostOp = llvm::Instruction::Sub;
1094      LLVM_FALLTHROUGH;
1095    case AtomicExpr::AO__c11_atomic_fetch_sub:
1096    case AtomicExpr::AO__opencl_atomic_fetch_sub:
1097    case AtomicExpr::AO__atomic_fetch_sub:
1098      LibCallName = "__atomic_fetch_sub";
1099      AddDirectArgument(*thisArgsUseOptimizedLibcallVal1.getPointer(),
1100                        LoweredMemTyE->getExprLoc(), sizeChars);
1101      break;
1102    // T __atomic_xor_fetch_N(T *mem, T val, int order)
1103    // T __atomic_fetch_xor_N(T *mem, T val, int order)
1104    case AtomicExpr::AO__atomic_xor_fetch:
1105      PostOp = llvm::Instruction::Xor;
1106      LLVM_FALLTHROUGH;
1107    case AtomicExpr::AO__c11_atomic_fetch_xor:
1108    case AtomicExpr::AO__opencl_atomic_fetch_xor:
1109    case AtomicExpr::AO__atomic_fetch_xor:
1110      LibCallName = "__atomic_fetch_xor";
1111      AddDirectArgument(*thisArgsUseOptimizedLibcallVal1.getPointer(),
1112                        MemTyE->getExprLoc(), sizeChars);
1113      break;
1114    case AtomicExpr::AO__atomic_fetch_min:
1115    case AtomicExpr::AO__opencl_atomic_fetch_min:
1116      LibCallName = E->getValueType()->isSignedIntegerType()
1117                        ? "__atomic_fetch_min"
1118                        : "__atomic_fetch_umin";
1119      AddDirectArgument(*thisArgsUseOptimizedLibcallVal1.getPointer(),
1120                        LoweredMemTyE->getExprLoc(), sizeChars);
1121      break;
1122    case AtomicExpr::AO__atomic_fetch_max:
1123    case AtomicExpr::AO__opencl_atomic_fetch_max:
1124      LibCallName = E->getValueType()->isSignedIntegerType()
1125                        ? "__atomic_fetch_max"
1126                        : "__atomic_fetch_umax";
1127      AddDirectArgument(*thisArgsUseOptimizedLibcallVal1.getPointer(),
1128                        LoweredMemTyE->getExprLoc(), sizeChars);
1129      break;
1130    // T __atomic_nand_fetch_N(T *mem, T val, int order)
1131    // T __atomic_fetch_nand_N(T *mem, T val, int order)
1132    case AtomicExpr::AO__atomic_nand_fetch:
1133      PostOp = llvm::Instruction::And; // the NOT is special cased below
1134      LLVM_FALLTHROUGH;
1135    case AtomicExpr::AO__atomic_fetch_nand:
1136      LibCallName = "__atomic_fetch_nand";
1137      AddDirectArgument(*thisArgsUseOptimizedLibcallVal1.getPointer(),
1138                        MemTyE->getExprLoc(), sizeChars);
1139      break;
1140    }
1141
1142    if (E->isOpenCL()) {
1143      LibCallName = std::string("__opencl") +
1144          StringRef(LibCallName).drop_front(1).str();
1145
1146    }
1147    // Optimized functions have the size in their name.
1148    if (UseOptimizedLibcall)
1149      LibCallName += "_" + llvm::utostr(Size);
1150    // By default, assume we return a value of the atomic type.
1151    if (!HaveRetTy) {
1152      if (UseOptimizedLibcall) {
1153        // Value is returned directly.
1154        // The function returns an appropriately sized integer type.
1155        RetTy = getContext().getIntTypeForBitwidth(
1156            getContext().toBits(sizeChars), /*Signed=*/false);
1157      } else {
1158        // Value is returned through parameter before the order.
1159        RetTy = getContext().VoidTy;
1160        Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1161                 getContext().VoidPtrTy);
1162      }
1163    }
1164    // order is always the last parameter
1165    Args.add(RValue::get(Order),
1166             getContext().IntTy);
1167    if (E->isOpenCL())
1168      Args.add(RValue::get(Scope), getContext().IntTy);
1169
1170    // PostOp is only needed for the atomic_*_fetch operations, and
1171    // thus is only needed for and implemented in the
1172    // UseOptimizedLibcall codepath.
1173    assert(UseOptimizedLibcall || !PostOp);
1174
1175    RValue Res = emitAtomicLibcall(*thisLibCallNameRetTyArgs);
1176    // The value is returned directly from the libcall.
1177    if (E->isCmpXChg())
1178      return Res;
1179
1180    // The value is returned directly for optimized libcalls but the expr
1181    // provided an out-param.
1182    if (UseOptimizedLibcall && Res.getScalarVal()) {
1183      llvm::Value *ResVal = Res.getScalarVal();
1184      if (PostOp) {
1185        llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1186        ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1187      }
1188      if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1189        ResVal = Builder.CreateNot(ResVal);
1190
1191      Builder.CreateStore(
1192          ResVal,
1193          Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1194    }
1195
1196    if (RValTy->isVoidType())
1197      return RValue::get(nullptr);
1198
1199    return convertTempToRValue(
1200        Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1201        RValTy, E->getExprLoc());
1202  }
1203
1204  bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1205                 E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1206                 E->getOp() == AtomicExpr::AO__atomic_store ||
1207                 E->getOp() == AtomicExpr::AO__atomic_store_n;
1208  bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1209                E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1210                E->getOp() == AtomicExpr::AO__atomic_load ||
1211                E->getOp() == AtomicExpr::AO__atomic_load_n;
1212
1213  if (isa<llvm::ConstantInt>(Order)) {
1214    auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1215    // We should not ever get to a case where the ordering isn't a valid C ABI
1216    // value, but it's hard to enforce that in general.
1217    if (llvm::isValidAtomicOrderingCABI(ord))
1218      switch ((llvm::AtomicOrderingCABI)ord) {
1219      case llvm::AtomicOrderingCABI::relaxed:
1220        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1221                     llvm::AtomicOrdering::Monotonic, Scope);
1222        break;
1223      case llvm::AtomicOrderingCABI::consume:
1224      case llvm::AtomicOrderingCABI::acquire:
1225        if (IsStore)
1226          break// Avoid crashing on code with undefined behavior
1227        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1228                     llvm::AtomicOrdering::Acquire, Scope);
1229        break;
1230      case llvm::AtomicOrderingCABI::release:
1231        if (IsLoad)
1232          break// Avoid crashing on code with undefined behavior
1233        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1234                     llvm::AtomicOrdering::Release, Scope);
1235        break;
1236      case llvm::AtomicOrderingCABI::acq_rel:
1237        if (IsLoad || IsStore)
1238          break// Avoid crashing on code with undefined behavior
1239        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1240                     llvm::AtomicOrdering::AcquireRelease, Scope);
1241        break;
1242      case llvm::AtomicOrderingCABI::seq_cst:
1243        EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1244                     llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1245        break;
1246      }
1247    if (RValTy->isVoidType())
1248      return RValue::get(nullptr);
1249
1250    return convertTempToRValue(
1251        Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1252                                        Dest.getAddressSpace())),
1253        RValTy, E->getExprLoc());
1254  }
1255
1256  // Long case, when Order isn't obviously constant.
1257
1258  // Create all the relevant BB's
1259  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1260                   *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1261                   *SeqCstBB = nullptr;
1262  MonotonicBB = createBasicBlock("monotonic"CurFn);
1263  if (!IsStore)
1264    AcquireBB = createBasicBlock("acquire"CurFn);
1265  if (!IsLoad)
1266    ReleaseBB = createBasicBlock("release"CurFn);
1267  if (!IsLoad && !IsStore)
1268    AcqRelBB = createBasicBlock("acqrel"CurFn);
1269  SeqCstBB = createBasicBlock("seqcst"CurFn);
1270  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue"CurFn);
1271
1272  // Create the switch for the split
1273  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1274  // doesn't matter unless someone is crazy enough to use something that
1275  // doesn't fold to a constant for the ordering.
1276  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1277  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1278
1279  // Emit all the different atomics
1280  Builder.SetInsertPoint(MonotonicBB);
1281  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1282               llvm::AtomicOrdering::Monotonic, Scope);
1283  Builder.CreateBr(ContBB);
1284  if (!IsStore) {
1285    Builder.SetInsertPoint(AcquireBB);
1286    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1287                 llvm::AtomicOrdering::Acquire, Scope);
1288    Builder.CreateBr(ContBB);
1289    SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1290                AcquireBB);
1291    SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1292                AcquireBB);
1293  }
1294  if (!IsLoad) {
1295    Builder.SetInsertPoint(ReleaseBB);
1296    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1297                 llvm::AtomicOrdering::Release, Scope);
1298    Builder.CreateBr(ContBB);
1299    SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1300                ReleaseBB);
1301  }
1302  if (!IsLoad && !IsStore) {
1303    Builder.SetInsertPoint(AcqRelBB);
1304    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1305                 llvm::AtomicOrdering::AcquireRelease, Scope);
1306    Builder.CreateBr(ContBB);
1307    SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1308                AcqRelBB);
1309  }
1310  Builder.SetInsertPoint(SeqCstBB);
1311  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1312               llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1313  Builder.CreateBr(ContBB);
1314  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1315              SeqCstBB);
1316
1317  // Cleanup and return
1318  Builder.SetInsertPoint(ContBB);
1319  if (RValTy->isVoidType())
1320    return RValue::get(nullptr);
1321
1322  assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1323  return convertTempToRValue(
1324      Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1325                                      Dest.getAddressSpace())),
1326      RValTy, E->getExprLoc());
1327}
1328
1329Address AtomicInfo::emitCastToAtomicIntPointer(Address addrconst {
1330  unsigned addrspace =
1331    cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1332  llvm::IntegerType *ty =
1333    llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1334  return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1335}
1336
1337Address AtomicInfo::convertToAtomicIntPointer(Address Addrconst {
1338  llvm::Type *Ty = Addr.getElementType();
1339  uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1340  if (SourceSizeInBits != AtomicSizeInBits) {
1341    Address Tmp = CreateTempAlloca();
1342    CGF.Builder.CreateMemCpy(TmpAddr,
1343                             std::min(AtomicSizeInBitsSourceSizeInBits) / 8);
1344    Addr = Tmp;
1345  }
1346
1347  return emitCastToAtomicIntPointer(Addr);
1348}
1349
1350RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1351                                             AggValueSlot resultSlot,
1352                                             SourceLocation loc,
1353                                             bool asValueconst {
1354  if (LVal.isSimple()) {
1355    if (EvaluationKind == TEK_Aggregate)
1356      return resultSlot.asRValue();
1357
1358    // Drill into the padding structure if we have one.
1359    if (hasPadding())
1360      addr = CGF.Builder.CreateStructGEP(addr0);
1361
1362    // Otherwise, just convert the temporary to an r-value using the
1363    // normal conversion routine.
1364    return CGF.convertTempToRValue(addrgetValueType(), loc);
1365  }
1366  if (!asValue)
1367    // Get RValue from temp memory as atomic for non-simple lvalues
1368    return RValue::get(CGF.Builder.CreateLoad(addr));
1369  if (LVal.isBitField())
1370    return CGF.EmitLoadOfBitfieldLValue(
1371        LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1372                             LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1373  if (LVal.isVectorElt())
1374    return CGF.EmitLoadOfLValue(
1375        LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1376                              LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1377  assert(LVal.isExtVectorElt());
1378  return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1379      addr, LVal.getExtVectorElts(), LVal.getType(),
1380      LVal.getBaseInfo(), TBAAAccessInfo()));
1381}
1382
1383RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1384                                             AggValueSlot ResultSlot,
1385                                             SourceLocation Loc,
1386                                             bool AsValueconst {
1387  // Try not to in some easy cases.
1388   (0) . __assert_fail ("IntVal->getType()->isIntegerTy() && \"Expected integer value\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGAtomic.cpp", 1388, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1389  if (getEvaluationKind() == TEK_Scalar &&
1390      (((!LVal.isBitField() ||
1391         LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1392        !hasPadding()) ||
1393       !AsValue)) {
1394    auto *ValTy = AsValue
1395                      ? CGF.ConvertTypeForMem(ValueTy)
1396                      : getAtomicAddress().getType()->getPointerElementType();
1397    if (ValTy->isIntegerTy()) {
1398       (0) . __assert_fail ("IntVal->getType() == ValTy && \"Different integer types.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGAtomic.cpp", 1398, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(IntVal->getType() == ValTy && "Different integer types.");
1399      return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1400    } else if (ValTy->isPointerTy())
1401      return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1402    else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1403      return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1404  }
1405
1406  // Create a temporary.  This needs to be big enough to hold the
1407  // atomic integer.
1408  Address Temp = Address::invalid();
1409  bool TempIsVolatile = false;
1410  if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1411    assert(!ResultSlot.isIgnored());
1412    Temp = ResultSlot.getAddress();
1413    TempIsVolatile = ResultSlot.isVolatile();
1414  } else {
1415    Temp = CreateTempAlloca();
1416  }
1417
1418  // Slam the integer into the temporary.
1419  Address CastTemp = emitCastToAtomicIntPointer(Temp);
1420  CGF.Builder.CreateStore(IntValCastTemp)
1421      ->setVolatile(TempIsVolatile);
1422
1423  return convertAtomicTempToRValue(TempResultSlotLocAsValue);
1424}
1425
1426void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1427                                       llvm::AtomicOrdering AObool) {
1428  // void __atomic_load(size_t size, void *mem, void *return, int order);
1429  CallArgList Args;
1430  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1431  Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1432           CGF.getContext().VoidPtrTy);
1433  Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1434           CGF.getContext().VoidPtrTy);
1435  Args.add(
1436      RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1437      CGF.getContext().IntTy);
1438  emitAtomicLibcall(CGF"__atomic_load"CGF.getContext().VoidTyArgs);
1439}
1440
1441llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1442                                          bool IsVolatile) {
1443  // Okay, we're doing this natively.
1444  Address Addr = getAtomicAddressAsAtomicIntPointer();
1445  llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1446  Load->setAtomic(AO);
1447
1448  // Other decoration.
1449  if (IsVolatile)
1450    Load->setVolatile(true);
1451  CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1452  return Load;
1453}
1454
1455/// An LValue is a candidate for having its loads and stores be made atomic if
1456/// we are operating under /volatile:ms *and* the LValue itself is volatile and
1457/// performing such an operation can be performed without a libcall.
1458bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1459  if (!CGM.getCodeGenOpts().MSVolatilereturn false;
1460  AtomicInfo AI(*thisLV);
1461  bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1462  // An atomic is inline if we don't need to use a libcall.
1463  bool AtomicIsInline = !AI.shouldUseLibcall();
1464  // MSVC doesn't seem to do this for types wider than a pointer.
1465  if (getContext().getTypeSize(LV.getType()) >
1466      getContext().getTypeSize(getContext().getIntPtrType()))
1467    return false;
1468  return IsVolatile && AtomicIsInline;
1469}
1470
1471RValue CodeGenFunction::EmitAtomicLoad(LValue LVSourceLocation SL,
1472                                       AggValueSlot Slot) {
1473  llvm::AtomicOrdering AO;
1474  bool IsVolatile = LV.isVolatileQualified();
1475  if (LV.getType()->isAtomicType()) {
1476    AO = llvm::AtomicOrdering::SequentiallyConsistent;
1477  } else {
1478    AO = llvm::AtomicOrdering::Acquire;
1479    IsVolatile = true;
1480  }
1481  return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1482}
1483
1484RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlotSourceLocation Loc,
1485                                  bool AsValue, llvm::AtomicOrdering AO,
1486                                  bool IsVolatile) {
1487  // Check whether we should use a library call.
1488  if (shouldUseLibcall()) {
1489    Address TempAddr = Address::invalid();
1490    if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1491      assert(getEvaluationKind() == TEK_Aggregate);
1492      TempAddr = ResultSlot.getAddress();
1493    } else
1494      TempAddr = CreateTempAlloca();
1495
1496    EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1497
1498    // Okay, turn that back into the original value or whole atomic (for
1499    // non-simple lvalues) type.
1500    return convertAtomicTempToRValue(TempAddrResultSlotLocAsValue);
1501  }
1502
1503  // Okay, we're doing this natively.
1504  auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1505
1506  // If we're ignoring an aggregate return, don't do anything.
1507  if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1508    return RValue::getAggregate(Address::invalid(), false);
1509
1510  // Okay, turn that back into the original value or atomic (for non-simple
1511  // lvalues) type.
1512  return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1513}
1514
1515/// Emit a load from an l-value of atomic type.  Note that the r-value
1516/// we produce is an r-value of the atomic *value* type.
1517RValue CodeGenFunction::EmitAtomicLoad(LValue srcSourceLocation loc,
1518                                       llvm::AtomicOrdering AObool IsVolatile,
1519                                       AggValueSlot resultSlot) {
1520  AtomicInfo Atomics(*thissrc);
1521  return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1522                                IsVolatile);
1523}
1524
1525/// Copy an r-value into memory as part of storing to an atomic type.
1526/// This needs to create a bit-pattern suitable for atomic operations.
1527void AtomicInfo::emitCopyIntoMemory(RValue rvalueconst {
1528  assert(LVal.isSimple());
1529  // If we have an r-value, the rvalue should be of the atomic type,
1530  // which means that the caller is responsible for having zeroed
1531  // any padding.  Just do an aggregate copy of that type.
1532  if (rvalue.isAggregate()) {
1533    LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1534    LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1535                                    getAtomicType());
1536    bool IsVolatile = rvalue.isVolatileQualified() ||
1537                      LVal.isVolatileQualified();
1538    CGF.EmitAggregateCopy(DestSrcgetAtomicType(),
1539                          AggValueSlot::DoesNotOverlapIsVolatile);
1540    return;
1541  }
1542
1543  // Okay, otherwise we're copying stuff.
1544
1545  // Zero out the buffer if necessary.
1546  emitMemSetZeroIfNecessary();
1547
1548  // Drill past the padding if present.
1549  LValue TempLVal = projectValue();
1550
1551  // Okay, store the rvalue in.
1552  if (rvalue.isScalar()) {
1553    CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal/*init*/ true);
1554  } else {
1555    CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal/*init*/ true);
1556  }
1557}
1558
1559
1560/// Materialize an r-value into memory for the purposes of storing it
1561/// to an atomic type.
1562Address AtomicInfo::materializeRValue(RValue rvalueconst {
1563  // Aggregate r-values are already in memory, and EmitAtomicStore
1564  // requires them to be values of the atomic type.
1565  if (rvalue.isAggregate())
1566    return rvalue.getAggregateAddress();
1567
1568  // Otherwise, make a temporary and materialize into it.
1569  LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1570  AtomicInfo Atomics(CGFTempLV);
1571  Atomics.emitCopyIntoMemory(rvalue);
1572  return TempLV.getAddress();
1573}
1574
1575llvm::Value *AtomicInfo::convertRValueToInt(RValue RValconst {
1576  // If we've got a scalar value of the right size, try to avoid going
1577  // through memory.
1578  if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1579    llvm::Value *Value = RVal.getScalarVal();
1580    if (isa<llvm::IntegerType>(Value->getType()))
1581      return CGF.EmitToMemory(ValueValueTy);
1582    else {
1583      llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1584          CGF.getLLVMContext(),
1585          LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1586      if (isa<llvm::PointerType>(Value->getType()))
1587        return CGF.Builder.CreatePtrToInt(ValueInputIntTy);
1588      else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1589        return CGF.Builder.CreateBitCast(ValueInputIntTy);
1590    }
1591  }
1592  // Otherwise, we need to go through memory.
1593  // Put the r-value in memory.
1594  Address Addr = materializeRValue(RVal);
1595
1596  // Cast the temporary to the atomic int type and pull a value out.
1597  Addr = emitCastToAtomicIntPointer(Addr);
1598  return CGF.Builder.CreateLoad(Addr);
1599}
1600
1601std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1602    llvm::Value *ExpectedValllvm::Value *DesiredVal,
1603    llvm::AtomicOrdering Success, llvm::AtomicOrdering Failurebool IsWeak) {
1604  // Do the atomic store.
1605  Address Addr = getAtomicAddressAsAtomicIntPointer();
1606  auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1607                                               ExpectedVal, DesiredVal,
1608                                               Success, Failure);
1609  // Other decoration.
1610  Inst->setVolatile(LVal.isVolatileQualified());
1611  Inst->setWeak(IsWeak);
1612
1613  // Okay, turn that back into the original value type.
1614  auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1615  auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1616  return std::make_pair(PreviousVal, SuccessFailureVal);
1617}
1618
1619llvm::Value *
1620AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1621                                             llvm::Value *DesiredAddr,
1622                                             llvm::AtomicOrdering Success,
1623                                             llvm::AtomicOrdering Failure) {
1624  // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1625  // void *desired, int success, int failure);
1626  CallArgList Args;
1627  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1628  Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1629           CGF.getContext().VoidPtrTy);
1630  Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1631           CGF.getContext().VoidPtrTy);
1632  Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1633           CGF.getContext().VoidPtrTy);
1634  Args.add(RValue::get(
1635               llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1636           CGF.getContext().IntTy);
1637  Args.add(RValue::get(
1638               llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1639           CGF.getContext().IntTy);
1640  auto SuccessFailureRVal = emitAtomicLibcall(CGF"__atomic_compare_exchange",
1641                                              CGF.getContext().BoolTyArgs);
1642
1643  return SuccessFailureRVal.getScalarVal();
1644}
1645
1646std::pair<RValuellvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1647    RValue ExpectedRValue Desired, llvm::AtomicOrdering Success,
1648    llvm::AtomicOrdering Failurebool IsWeak) {
1649  if (isStrongerThan(Failure, Success))
1650    // Don't assert on undefined behavior "failure argument shall be no stronger
1651    // than the success argument".
1652    Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1653
1654  // Check whether we should use a library call.
1655  if (shouldUseLibcall()) {
1656    // Produce a source address.
1657    Address ExpectedAddr = materializeRValue(Expected);
1658    Address DesiredAddr = materializeRValue(Desired);
1659    auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1660                                                 DesiredAddr.getPointer(),
1661                                                 Success, Failure);
1662    return std::make_pair(
1663        convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1664                                  SourceLocation(), /*AsValue=*/false),
1665        Res);
1666  }
1667
1668  // If we've got a scalar value of the right size, try to avoid going
1669  // through memory.
1670  auto *ExpectedVal = convertRValueToInt(Expected);
1671  auto *DesiredVal = convertRValueToInt(Desired);
1672  auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1673                                         Failure, IsWeak);
1674  return std::make_pair(
1675      ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1676                                SourceLocation(), /*AsValue=*/false),
1677      Res.second);
1678}
1679
1680static void
1681EmitAtomicUpdateValue(CodeGenFunction &CGFAtomicInfo &AtomicsRValue OldRVal,
1682                      const llvm::function_ref<RValue(RValue)> &UpdateOp,
1683                      Address DesiredAddr) {
1684  RValue UpRVal;
1685  LValue AtomicLVal = Atomics.getAtomicLValue();
1686  LValue DesiredLVal;
1687  if (AtomicLVal.isSimple()) {
1688    UpRVal = OldRVal;
1689    DesiredLVal = CGF.MakeAddrLValue(DesiredAddrAtomicLVal.getType());
1690  } else {
1691    // Build new lvalue for temp address
1692    Address Ptr = Atomics.materializeRValue(OldRVal);
1693    LValue UpdateLVal;
1694    if (AtomicLVal.isBitField()) {
1695      UpdateLVal =
1696          LValue::MakeBitfield(PtrAtomicLVal.getBitFieldInfo(),
1697                               AtomicLVal.getType(),
1698                               AtomicLVal.getBaseInfo(),
1699                               AtomicLVal.getTBAAInfo());
1700      DesiredLVal =
1701          LValue::MakeBitfield(DesiredAddrAtomicLVal.getBitFieldInfo(),
1702                               AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1703                               AtomicLVal.getTBAAInfo());
1704    } else if (AtomicLVal.isVectorElt()) {
1705      UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1706                                         AtomicLVal.getType(),
1707                                         AtomicLVal.getBaseInfo(),
1708                                         AtomicLVal.getTBAAInfo());
1709      DesiredLVal = LValue::MakeVectorElt(
1710          DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1711          AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1712    } else {
1713      assert(AtomicLVal.isExtVectorElt());
1714      UpdateLVal = LValue::MakeExtVectorElt(PtrAtomicLVal.getExtVectorElts(),
1715                                            AtomicLVal.getType(),
1716                                            AtomicLVal.getBaseInfo(),
1717                                            AtomicLVal.getTBAAInfo());
1718      DesiredLVal = LValue::MakeExtVectorElt(
1719          DesiredAddrAtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1720          AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1721    }
1722    UpRVal = CGF.EmitLoadOfLValue(UpdateLValSourceLocation());
1723  }
1724  // Store new value in the corresponding memory area
1725  RValue NewRVal = UpdateOp(UpRVal);
1726  if (NewRVal.isScalar()) {
1727    CGF.EmitStoreThroughLValue(NewRValDesiredLVal);
1728  } else {
1729    assert(NewRVal.isComplex());
1730    CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1731                           /*isInit=*/false);
1732  }
1733}
1734
1735void AtomicInfo::EmitAtomicUpdateLibcall(
1736    llvm::AtomicOrdering AOconst llvm::function_ref<RValue(RValue)> &UpdateOp,
1737    bool IsVolatile) {
1738  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1739
1740  Address ExpectedAddr = CreateTempAlloca();
1741
1742  EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1743  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1744  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1745  CGF.EmitBlock(ContBB);
1746  Address DesiredAddr = CreateTempAlloca();
1747  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1748      requiresMemSetZero(getAtomicAddress().getElementType())) {
1749    auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1750    CGF.Builder.CreateStore(OldVal, DesiredAddr);
1751  }
1752  auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1753                                           AggValueSlot::ignored(),
1754                                           SourceLocation(), /*AsValue=*/false);
1755  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1756  auto *Res =
1757      EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1758                                       DesiredAddr.getPointer(),
1759                                       AO, Failure);
1760  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1761  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1762}
1763
1764void AtomicInfo::EmitAtomicUpdateOp(
1765    llvm::AtomicOrdering AOconst llvm::function_ref<RValue(RValue)> &UpdateOp,
1766    bool IsVolatile) {
1767  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1768
1769  // Do the atomic load.
1770  auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1771  // For non-simple lvalues perform compare-and-swap procedure.
1772  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1773  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1774  auto *CurBB = CGF.Builder.GetInsertBlock();
1775  CGF.EmitBlock(ContBB);
1776  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1777                                             /*NumReservedValues=*/2);
1778  PHI->addIncoming(OldVal, CurBB);
1779  Address NewAtomicAddr = CreateTempAlloca();
1780  Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1781  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1782      requiresMemSetZero(getAtomicAddress().getElementType())) {
1783    CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1784  }
1785  auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1786                                           SourceLocation(), /*AsValue=*/false);
1787  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1788  auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1789  // Try to write new value using cmpxchg operation
1790  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1791  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1792  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1793  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1794}
1795
1796static void EmitAtomicUpdateValue(CodeGenFunction &CGFAtomicInfo &Atomics,
1797                                  RValue UpdateRValAddress DesiredAddr) {
1798  LValue AtomicLVal = Atomics.getAtomicLValue();
1799  LValue DesiredLVal;
1800  // Build new lvalue for temp address
1801  if (AtomicLVal.isBitField()) {
1802    DesiredLVal =
1803        LValue::MakeBitfield(DesiredAddrAtomicLVal.getBitFieldInfo(),
1804                             AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1805                             AtomicLVal.getTBAAInfo());
1806  } else if (AtomicLVal.isVectorElt()) {
1807    DesiredLVal =
1808        LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1809                              AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1810                              AtomicLVal.getTBAAInfo());
1811  } else {
1812    assert(AtomicLVal.isExtVectorElt());
1813    DesiredLVal = LValue::MakeExtVectorElt(
1814        DesiredAddrAtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1815        AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1816  }
1817  // Store new value in the corresponding memory area
1818  assert(UpdateRVal.isScalar());
1819  CGF.EmitStoreThroughLValue(UpdateRValDesiredLVal);
1820}
1821
1822void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1823                                         RValue UpdateRValbool IsVolatile) {
1824  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1825
1826  Address ExpectedAddr = CreateTempAlloca();
1827
1828  EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1829  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1830  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1831  CGF.EmitBlock(ContBB);
1832  Address DesiredAddr = CreateTempAlloca();
1833  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1834      requiresMemSetZero(getAtomicAddress().getElementType())) {
1835    auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1836    CGF.Builder.CreateStore(OldVal, DesiredAddr);
1837  }
1838  EmitAtomicUpdateValue(CGF*thisUpdateRValDesiredAddr);
1839  auto *Res =
1840      EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1841                                       DesiredAddr.getPointer(),
1842                                       AO, Failure);
1843  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1844  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1845}
1846
1847void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AORValue UpdateRVal,
1848                                    bool IsVolatile) {
1849  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1850
1851  // Do the atomic load.
1852  auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1853  // For non-simple lvalues perform compare-and-swap procedure.
1854  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1855  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1856  auto *CurBB = CGF.Builder.GetInsertBlock();
1857  CGF.EmitBlock(ContBB);
1858  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1859                                             /*NumReservedValues=*/2);
1860  PHI->addIncoming(OldVal, CurBB);
1861  Address NewAtomicAddr = CreateTempAlloca();
1862  Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1863  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1864      requiresMemSetZero(getAtomicAddress().getElementType())) {
1865    CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1866  }
1867  EmitAtomicUpdateValue(CGF*thisUpdateRValNewAtomicAddr);
1868  auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1869  // Try to write new value using cmpxchg operation
1870  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1871  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1872  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1873  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1874}
1875
1876void AtomicInfo::EmitAtomicUpdate(
1877    llvm::AtomicOrdering AOconst llvm::function_ref<RValue(RValue)> &UpdateOp,
1878    bool IsVolatile) {
1879  if (shouldUseLibcall()) {
1880    EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1881  } else {
1882    EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1883  }
1884}
1885
1886void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AORValue UpdateRVal,
1887                                  bool IsVolatile) {
1888  if (shouldUseLibcall()) {
1889    EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1890  } else {
1891    EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1892  }
1893}
1894
1895void CodeGenFunction::EmitAtomicStore(RValue rvalueLValue lvalue,
1896                                      bool isInit) {
1897  bool IsVolatile = lvalue.isVolatileQualified();
1898  llvm::AtomicOrdering AO;
1899  if (lvalue.getType()->isAtomicType()) {
1900    AO = llvm::AtomicOrdering::SequentiallyConsistent;
1901  } else {
1902    AO = llvm::AtomicOrdering::Release;
1903    IsVolatile = true;
1904  }
1905  return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1906}
1907
1908/// Emit a store to an l-value of atomic type.
1909///
1910/// Note that the r-value is expected to be an r-value *of the atomic
1911/// type*; this means that for aggregate r-values, it should include
1912/// storage for any padding that was necessary.
1913void CodeGenFunction::EmitAtomicStore(RValue rvalueLValue dest,
1914                                      llvm::AtomicOrdering AObool IsVolatile,
1915                                      bool isInit) {
1916  // If this is an aggregate r-value, it should agree in type except
1917  // maybe for address-space qualification.
1918  assert(!rvalue.isAggregate() ||
1919         rvalue.getAggregateAddress().getElementType()
1920           == dest.getAddress().getElementType());
1921
1922  AtomicInfo atomics(*thisdest);
1923  LValue LVal = atomics.getAtomicLValue();
1924
1925  // If this is an initialization, just put the value there normally.
1926  if (LVal.isSimple()) {
1927    if (isInit) {
1928      atomics.emitCopyIntoMemory(rvalue);
1929      return;
1930    }
1931
1932    // Check whether we should use a library call.
1933    if (atomics.shouldUseLibcall()) {
1934      // Produce a source address.
1935      Address srcAddr = atomics.materializeRValue(rvalue);
1936
1937      // void __atomic_store(size_t size, void *mem, void *val, int order)
1938      CallArgList args;
1939      args.add(RValue::get(atomics.getAtomicSizeValue()),
1940               getContext().getSizeType());
1941      args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
1942               getContext().VoidPtrTy);
1943      args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
1944               getContext().VoidPtrTy);
1945      args.add(
1946          RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
1947          getContext().IntTy);
1948      emitAtomicLibcall(*this"__atomic_store"getContext().VoidTyargs);
1949      return;
1950    }
1951
1952    // Okay, we're doing this natively.
1953    llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1954
1955    // Do the atomic store.
1956    Address addr =
1957        atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1958    intValue = Builder.CreateIntCast(
1959        intValue, addr.getElementType(), /*isSigned=*/false);
1960    llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1961
1962    // Initializations don't need to be atomic.
1963    if (!isInit)
1964      store->setAtomic(AO);
1965
1966    // Other decoration.
1967    if (IsVolatile)
1968      store->setVolatile(true);
1969    CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
1970    return;
1971  }
1972
1973  // Emit simple atomic update operation.
1974  atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1975}
1976
1977/// Emit a compare-and-exchange op for atomic type.
1978///
1979std::pair<RValuellvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
1980    LValue ObjRValue ExpectedRValue DesiredSourceLocation Loc,
1981    llvm::AtomicOrdering Success, llvm::AtomicOrdering Failurebool IsWeak,
1982    AggValueSlot Slot) {
1983  // If this is an aggregate r-value, it should agree in type except
1984  // maybe for address-space qualification.
1985  assert(!Expected.isAggregate() ||
1986         Expected.getAggregateAddress().getElementType() ==
1987             Obj.getAddress().getElementType());
1988  assert(!Desired.isAggregate() ||
1989         Desired.getAggregateAddress().getElementType() ==
1990             Obj.getAddress().getElementType());
1991  AtomicInfo Atomics(*thisObj);
1992
1993  return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1994                                           IsWeak);
1995}
1996
1997void CodeGenFunction::EmitAtomicUpdate(
1998    LValue LVal, llvm::AtomicOrdering AO,
1999    const llvm::function_ref<RValue(RValue)> &UpdateOpbool IsVolatile) {
2000  AtomicInfo Atomics(*thisLVal);
2001  Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2002}
2003
2004void CodeGenFunction::EmitAtomicInit(Expr *initLValue dest) {
2005  AtomicInfo atomics(*thisdest);
2006
2007  switch (atomics.getEvaluationKind()) {
2008  case TEK_Scalar: {
2009    llvm::Value *value = EmitScalarExpr(init);
2010    atomics.emitCopyIntoMemory(RValue::get(value));
2011    return;
2012  }
2013
2014  case TEK_Complex: {
2015    ComplexPairTy value = EmitComplexExpr(init);
2016    atomics.emitCopyIntoMemory(RValue::getComplex(value));
2017    return;
2018  }
2019
2020  case TEK_Aggregate: {
2021    // Fix up the destination if the initializer isn't an expression
2022    // of atomic type.
2023    bool Zeroed = false;
2024    if (!init->getType()->isAtomicType()) {
2025      Zeroed = atomics.emitMemSetZeroIfNecessary();
2026      dest = atomics.projectValue();
2027    }
2028
2029    // Evaluate the expression directly into the destination.
2030    AggValueSlot slot = AggValueSlot::forLValue(dest,
2031                                        AggValueSlot::IsNotDestructed,
2032                                        AggValueSlot::DoesNotNeedGCBarriers,
2033                                        AggValueSlot::IsNotAliased,
2034                                        AggValueSlot::DoesNotOverlap,
2035                                        Zeroed ? AggValueSlot::IsZeroed :
2036                                                 AggValueSlot::IsNotZeroed);
2037
2038    EmitAggExpr(initslot);
2039    return;
2040  }
2041  }
2042  llvm_unreachable("bad evaluation kind");
2043}
2044
clang::CodeGen::CodeGenFunction::EmitAtomicExpr
clang::CodeGen::CodeGenFunction::LValueIsSuitableForInlineAtomic
clang::CodeGen::CodeGenFunction::EmitAtomicLoad
clang::CodeGen::CodeGenFunction::EmitAtomicLoad
clang::CodeGen::CodeGenFunction::EmitAtomicStore
clang::CodeGen::CodeGenFunction::EmitAtomicStore
clang::CodeGen::CodeGenFunction::EmitAtomicCompareExchange
clang::CodeGen::CodeGenFunction::EmitAtomicUpdate
clang::CodeGen::CodeGenFunction::EmitAtomicInit