1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the code for emitting atomic operations.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "CGRecordLayout.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/CodeGen/CGFunctionInfo.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/Operator.h"
25 
26 using namespace clang;
27 using namespace CodeGen;
28 
29 namespace {
30   class AtomicInfo {
31     CodeGenFunction &CGF;
32     QualType AtomicTy;
33     QualType ValueTy;
34     uint64_t AtomicSizeInBits;
35     uint64_t ValueSizeInBits;
36     CharUnits AtomicAlign;
37     CharUnits ValueAlign;
38     CharUnits LValueAlign;
39     TypeEvaluationKind EvaluationKind;
40     bool UseLibcall;
41     LValue LVal;
42     CGBitFieldInfo BFI;
43   public:
44     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
45         : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
46           EvaluationKind(TEK_Scalar), UseLibcall(true) {
47       assert(!lvalue.isGlobalReg());
48       ASTContext &C = CGF.getContext();
49       if (lvalue.isSimple()) {
50         AtomicTy = lvalue.getType();
51         if (auto *ATy = AtomicTy->getAs<AtomicType>())
52           ValueTy = ATy->getValueType();
53         else
54           ValueTy = AtomicTy;
55         EvaluationKind = CGF.getEvaluationKind(ValueTy);
56 
57         uint64_t ValueAlignInBits;
58         uint64_t AtomicAlignInBits;
59         TypeInfo ValueTI = C.getTypeInfo(ValueTy);
60         ValueSizeInBits = ValueTI.Width;
61         ValueAlignInBits = ValueTI.Align;
62 
63         TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
64         AtomicSizeInBits = AtomicTI.Width;
65         AtomicAlignInBits = AtomicTI.Align;
66 
67         assert(ValueSizeInBits <= AtomicSizeInBits);
68         assert(ValueAlignInBits <= AtomicAlignInBits);
69 
70         AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
71         ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
72         if (lvalue.getAlignment().isZero())
73           lvalue.setAlignment(AtomicAlign);
74 
75         LVal = lvalue;
76       } else if (lvalue.isBitField()) {
77         ValueTy = lvalue.getType();
78         ValueSizeInBits = C.getTypeSize(ValueTy);
79         auto &OrigBFI = lvalue.getBitFieldInfo();
80         auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
81         AtomicSizeInBits = C.toBits(
82             C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
83                 .alignTo(lvalue.getAlignment()));
84         auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
85         auto OffsetInChars =
86             (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
87             lvalue.getAlignment();
88         VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
89             VoidPtrAddr, OffsetInChars.getQuantity());
90         auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
91             VoidPtrAddr,
92             CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
93             "atomic_bitfield_base");
94         BFI = OrigBFI;
95         BFI.Offset = Offset;
96         BFI.StorageSize = AtomicSizeInBits;
97         BFI.StorageOffset += OffsetInChars;
98         LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
99                                     BFI, lvalue.getType(),
100                                     lvalue.getBaseInfo());
101         LVal.setTBAAInfo(lvalue.getTBAAInfo());
102         AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
103         if (AtomicTy.isNull()) {
104           llvm::APInt Size(
105               /*numBits=*/32,
106               C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
107           AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
108                                             /*IndexTypeQuals=*/0);
109         }
110         AtomicAlign = ValueAlign = lvalue.getAlignment();
111       } else if (lvalue.isVectorElt()) {
112         ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
113         ValueSizeInBits = C.getTypeSize(ValueTy);
114         AtomicTy = lvalue.getType();
115         AtomicSizeInBits = C.getTypeSize(AtomicTy);
116         AtomicAlign = ValueAlign = lvalue.getAlignment();
117         LVal = lvalue;
118       } else {
119         assert(lvalue.isExtVectorElt());
120         ValueTy = lvalue.getType();
121         ValueSizeInBits = C.getTypeSize(ValueTy);
122         AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
123             lvalue.getType(), lvalue.getExtVectorAddress()
124                                   .getElementType()->getVectorNumElements());
125         AtomicSizeInBits = C.getTypeSize(AtomicTy);
126         AtomicAlign = ValueAlign = lvalue.getAlignment();
127         LVal = lvalue;
128       }
129       UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
130           AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
131     }
132 
133     QualType getAtomicType() const { return AtomicTy; }
134     QualType getValueType() const { return ValueTy; }
135     CharUnits getAtomicAlignment() const { return AtomicAlign; }
136     CharUnits getValueAlignment() const { return ValueAlign; }
137     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
138     uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
139     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
140     bool shouldUseLibcall() const { return UseLibcall; }
141     const LValue &getAtomicLValue() const { return LVal; }
142     llvm::Value *getAtomicPointer() const {
143       if (LVal.isSimple())
144         return LVal.getPointer();
145       else if (LVal.isBitField())
146         return LVal.getBitFieldPointer();
147       else if (LVal.isVectorElt())
148         return LVal.getVectorPointer();
149       assert(LVal.isExtVectorElt());
150       return LVal.getExtVectorPointer();
151     }
152     Address getAtomicAddress() const {
153       return Address(getAtomicPointer(), getAtomicAlignment());
154     }
155 
156     Address getAtomicAddressAsAtomicIntPointer() const {
157       return emitCastToAtomicIntPointer(getAtomicAddress());
158     }
159 
160     /// Is the atomic size larger than the underlying value type?
161     ///
162     /// Note that the absence of padding does not mean that atomic
163     /// objects are completely interchangeable with non-atomic
164     /// objects: we might have promoted the alignment of a type
165     /// without making it bigger.
166     bool hasPadding() const {
167       return (ValueSizeInBits != AtomicSizeInBits);
168     }
169 
170     bool emitMemSetZeroIfNecessary() const;
171 
172     llvm::Value *getAtomicSizeValue() const {
173       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
174       return CGF.CGM.getSize(size);
175     }
176 
177     /// Cast the given pointer to an integer pointer suitable for atomic
178     /// operations if the source.
179     Address emitCastToAtomicIntPointer(Address Addr) const;
180 
181     /// If Addr is compatible with the iN that will be used for an atomic
182     /// operation, bitcast it. Otherwise, create a temporary that is suitable
183     /// and copy the value across.
184     Address convertToAtomicIntPointer(Address Addr) const;
185 
186     /// Turn an atomic-layout object into an r-value.
187     RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
188                                      SourceLocation loc, bool AsValue) const;
189 
190     /// \brief Converts a rvalue to integer value.
191     llvm::Value *convertRValueToInt(RValue RVal) const;
192 
193     RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
194                                      AggValueSlot ResultSlot,
195                                      SourceLocation Loc, bool AsValue) const;
196 
197     /// Copy an atomic r-value into atomic-layout memory.
198     void emitCopyIntoMemory(RValue rvalue) const;
199 
200     /// Project an l-value down to the value field.
201     LValue projectValue() const {
202       assert(LVal.isSimple());
203       Address addr = getAtomicAddress();
204       if (hasPadding())
205         addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
206 
207       return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
208                               LVal.getBaseInfo(), LVal.getTBAAInfo());
209     }
210 
211     /// \brief Emits atomic load.
212     /// \returns Loaded value.
213     RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
214                           bool AsValue, llvm::AtomicOrdering AO,
215                           bool IsVolatile);
216 
217     /// \brief Emits atomic compare-and-exchange sequence.
218     /// \param Expected Expected value.
219     /// \param Desired Desired value.
220     /// \param Success Atomic ordering for success operation.
221     /// \param Failure Atomic ordering for failed operation.
222     /// \param IsWeak true if atomic operation is weak, false otherwise.
223     /// \returns Pair of values: previous value from storage (value type) and
224     /// boolean flag (i1 type) with true if success and false otherwise.
225     std::pair<RValue, llvm::Value *>
226     EmitAtomicCompareExchange(RValue Expected, RValue Desired,
227                               llvm::AtomicOrdering Success =
228                                   llvm::AtomicOrdering::SequentiallyConsistent,
229                               llvm::AtomicOrdering Failure =
230                                   llvm::AtomicOrdering::SequentiallyConsistent,
231                               bool IsWeak = false);
232 
233     /// \brief Emits atomic update.
234     /// \param AO Atomic ordering.
235     /// \param UpdateOp Update operation for the current lvalue.
236     void EmitAtomicUpdate(llvm::AtomicOrdering AO,
237                           const llvm::function_ref<RValue(RValue)> &UpdateOp,
238                           bool IsVolatile);
239     /// \brief Emits atomic update.
240     /// \param AO Atomic ordering.
241     void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
242                           bool IsVolatile);
243 
244     /// Materialize an atomic r-value in atomic-layout memory.
245     Address materializeRValue(RValue rvalue) const;
246 
247     /// \brief Creates temp alloca for intermediate operations on atomic value.
248     Address CreateTempAlloca() const;
249   private:
250     bool requiresMemSetZero(llvm::Type *type) const;
251 
252 
253     /// \brief Emits atomic load as a libcall.
254     void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
255                                llvm::AtomicOrdering AO, bool IsVolatile);
256     /// \brief Emits atomic load as LLVM instruction.
257     llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
258     /// \brief Emits atomic compare-and-exchange op as a libcall.
259     llvm::Value *EmitAtomicCompareExchangeLibcall(
260         llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
261         llvm::AtomicOrdering Success =
262             llvm::AtomicOrdering::SequentiallyConsistent,
263         llvm::AtomicOrdering Failure =
264             llvm::AtomicOrdering::SequentiallyConsistent);
265     /// \brief Emits atomic compare-and-exchange op as LLVM instruction.
266     std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
267         llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
268         llvm::AtomicOrdering Success =
269             llvm::AtomicOrdering::SequentiallyConsistent,
270         llvm::AtomicOrdering Failure =
271             llvm::AtomicOrdering::SequentiallyConsistent,
272         bool IsWeak = false);
273     /// \brief Emit atomic update as libcalls.
274     void
275     EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
276                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
277                             bool IsVolatile);
278     /// \brief Emit atomic update as LLVM instructions.
279     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
280                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
281                             bool IsVolatile);
282     /// \brief Emit atomic update as libcalls.
283     void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
284                                  bool IsVolatile);
285     /// \brief Emit atomic update as LLVM instructions.
286     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
287                             bool IsVolatile);
288   };
289 }
290 
291 Address AtomicInfo::CreateTempAlloca() const {
292   Address TempAlloca = CGF.CreateMemTemp(
293       (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
294                                                                 : AtomicTy,
295       getAtomicAlignment(),
296       "atomic-temp");
297   // Cast to pointer to value type for bitfields.
298   if (LVal.isBitField())
299     return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
300         TempAlloca, getAtomicAddress().getType());
301   return TempAlloca;
302 }
303 
304 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
305                                 StringRef fnName,
306                                 QualType resultType,
307                                 CallArgList &args) {
308   const CGFunctionInfo &fnInfo =
309     CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
310   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
311   llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
312   auto callee = CGCallee::forDirect(fn);
313   return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
314 }
315 
316 /// Does a store of the given IR type modify the full expected width?
317 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
318                            uint64_t expectedSize) {
319   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
320 }
321 
322 /// Does the atomic type require memsetting to zero before initialization?
323 ///
324 /// The IR type is provided as a way of making certain queries faster.
325 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
326   // If the atomic type has size padding, we definitely need a memset.
327   if (hasPadding()) return true;
328 
329   // Otherwise, do some simple heuristics to try to avoid it:
330   switch (getEvaluationKind()) {
331   // For scalars and complexes, check whether the store size of the
332   // type uses the full size.
333   case TEK_Scalar:
334     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
335   case TEK_Complex:
336     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
337                            AtomicSizeInBits / 2);
338 
339   // Padding in structs has an undefined bit pattern.  User beware.
340   case TEK_Aggregate:
341     return false;
342   }
343   llvm_unreachable("bad evaluation kind");
344 }
345 
346 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
347   assert(LVal.isSimple());
348   llvm::Value *addr = LVal.getPointer();
349   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
350     return false;
351 
352   CGF.Builder.CreateMemSet(
353       addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
354       CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
355       LVal.getAlignment().getQuantity());
356   return true;
357 }
358 
359 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
360                               Address Dest, Address Ptr,
361                               Address Val1, Address Val2,
362                               uint64_t Size,
363                               llvm::AtomicOrdering SuccessOrder,
364                               llvm::AtomicOrdering FailureOrder,
365                               llvm::SyncScope::ID Scope) {
366   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
367   llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
368   llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
369 
370   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
371       Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
372       Scope);
373   Pair->setVolatile(E->isVolatile());
374   Pair->setWeak(IsWeak);
375 
376   // Cmp holds the result of the compare-exchange operation: true on success,
377   // false on failure.
378   llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
379   llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
380 
381   // This basic block is used to hold the store instruction if the operation
382   // failed.
383   llvm::BasicBlock *StoreExpectedBB =
384       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
385 
386   // This basic block is the exit point of the operation, we should end up
387   // here regardless of whether or not the operation succeeded.
388   llvm::BasicBlock *ContinueBB =
389       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
390 
391   // Update Expected if Expected isn't equal to Old, otherwise branch to the
392   // exit point.
393   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
394 
395   CGF.Builder.SetInsertPoint(StoreExpectedBB);
396   // Update the memory at Expected with Old's value.
397   CGF.Builder.CreateStore(Old, Val1);
398   // Finally, branch to the exit point.
399   CGF.Builder.CreateBr(ContinueBB);
400 
401   CGF.Builder.SetInsertPoint(ContinueBB);
402   // Update the memory at Dest with Cmp's value.
403   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
404 }
405 
406 /// Given an ordering required on success, emit all possible cmpxchg
407 /// instructions to cope with the provided (but possibly only dynamically known)
408 /// FailureOrder.
409 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
410                                         bool IsWeak, Address Dest, Address Ptr,
411                                         Address Val1, Address Val2,
412                                         llvm::Value *FailureOrderVal,
413                                         uint64_t Size,
414                                         llvm::AtomicOrdering SuccessOrder,
415                                         llvm::SyncScope::ID Scope) {
416   llvm::AtomicOrdering FailureOrder;
417   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
418     auto FOS = FO->getSExtValue();
419     if (!llvm::isValidAtomicOrderingCABI(FOS))
420       FailureOrder = llvm::AtomicOrdering::Monotonic;
421     else
422       switch ((llvm::AtomicOrderingCABI)FOS) {
423       case llvm::AtomicOrderingCABI::relaxed:
424       case llvm::AtomicOrderingCABI::release:
425       case llvm::AtomicOrderingCABI::acq_rel:
426         FailureOrder = llvm::AtomicOrdering::Monotonic;
427         break;
428       case llvm::AtomicOrderingCABI::consume:
429       case llvm::AtomicOrderingCABI::acquire:
430         FailureOrder = llvm::AtomicOrdering::Acquire;
431         break;
432       case llvm::AtomicOrderingCABI::seq_cst:
433         FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
434         break;
435       }
436     if (isStrongerThan(FailureOrder, SuccessOrder)) {
437       // Don't assert on undefined behavior "failure argument shall be no
438       // stronger than the success argument".
439       FailureOrder =
440           llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
441     }
442     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
443                       FailureOrder, Scope);
444     return;
445   }
446 
447   // Create all the relevant BB's
448   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
449                    *SeqCstBB = nullptr;
450   MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
451   if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
452       SuccessOrder != llvm::AtomicOrdering::Release)
453     AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
454   if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
455     SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
456 
457   llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
458 
459   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
460 
461   // Emit all the different atomics
462 
463   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
464   // doesn't matter unless someone is crazy enough to use something that
465   // doesn't fold to a constant for the ordering.
466   CGF.Builder.SetInsertPoint(MonotonicBB);
467   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
468                     Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
469   CGF.Builder.CreateBr(ContBB);
470 
471   if (AcquireBB) {
472     CGF.Builder.SetInsertPoint(AcquireBB);
473     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
474                       Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
475     CGF.Builder.CreateBr(ContBB);
476     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
477                 AcquireBB);
478     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
479                 AcquireBB);
480   }
481   if (SeqCstBB) {
482     CGF.Builder.SetInsertPoint(SeqCstBB);
483     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
484                       llvm::AtomicOrdering::SequentiallyConsistent, Scope);
485     CGF.Builder.CreateBr(ContBB);
486     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
487                 SeqCstBB);
488   }
489 
490   CGF.Builder.SetInsertPoint(ContBB);
491 }
492 
493 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
494                          Address Ptr, Address Val1, Address Val2,
495                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
496                          uint64_t Size, llvm::AtomicOrdering Order,
497                          llvm::SyncScope::ID Scope) {
498   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
499   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
500 
501   switch (E->getOp()) {
502   case AtomicExpr::AO__c11_atomic_init:
503   case AtomicExpr::AO__opencl_atomic_init:
504     llvm_unreachable("Already handled!");
505 
506   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
507   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
508     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
509                                 FailureOrder, Size, Order, Scope);
510     return;
511   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
512   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
513     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
514                                 FailureOrder, Size, Order, Scope);
515     return;
516   case AtomicExpr::AO__atomic_compare_exchange:
517   case AtomicExpr::AO__atomic_compare_exchange_n: {
518     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
519       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
520                                   Val1, Val2, FailureOrder, Size, Order, Scope);
521     } else {
522       // Create all the relevant BB's
523       llvm::BasicBlock *StrongBB =
524           CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
525       llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
526       llvm::BasicBlock *ContBB =
527           CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
528 
529       llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
530       SI->addCase(CGF.Builder.getInt1(false), StrongBB);
531 
532       CGF.Builder.SetInsertPoint(StrongBB);
533       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
534                                   FailureOrder, Size, Order, Scope);
535       CGF.Builder.CreateBr(ContBB);
536 
537       CGF.Builder.SetInsertPoint(WeakBB);
538       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
539                                   FailureOrder, Size, Order, Scope);
540       CGF.Builder.CreateBr(ContBB);
541 
542       CGF.Builder.SetInsertPoint(ContBB);
543     }
544     return;
545   }
546   case AtomicExpr::AO__c11_atomic_load:
547   case AtomicExpr::AO__opencl_atomic_load:
548   case AtomicExpr::AO__atomic_load_n:
549   case AtomicExpr::AO__atomic_load: {
550     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
551     Load->setAtomic(Order, Scope);
552     Load->setVolatile(E->isVolatile());
553     CGF.Builder.CreateStore(Load, Dest);
554     return;
555   }
556 
557   case AtomicExpr::AO__c11_atomic_store:
558   case AtomicExpr::AO__opencl_atomic_store:
559   case AtomicExpr::AO__atomic_store:
560   case AtomicExpr::AO__atomic_store_n: {
561     llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
562     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
563     Store->setAtomic(Order, Scope);
564     Store->setVolatile(E->isVolatile());
565     return;
566   }
567 
568   case AtomicExpr::AO__c11_atomic_exchange:
569   case AtomicExpr::AO__opencl_atomic_exchange:
570   case AtomicExpr::AO__atomic_exchange_n:
571   case AtomicExpr::AO__atomic_exchange:
572     Op = llvm::AtomicRMWInst::Xchg;
573     break;
574 
575   case AtomicExpr::AO__atomic_add_fetch:
576     PostOp = llvm::Instruction::Add;
577     // Fall through.
578   case AtomicExpr::AO__c11_atomic_fetch_add:
579   case AtomicExpr::AO__opencl_atomic_fetch_add:
580   case AtomicExpr::AO__atomic_fetch_add:
581     Op = llvm::AtomicRMWInst::Add;
582     break;
583 
584   case AtomicExpr::AO__atomic_sub_fetch:
585     PostOp = llvm::Instruction::Sub;
586     // Fall through.
587   case AtomicExpr::AO__c11_atomic_fetch_sub:
588   case AtomicExpr::AO__opencl_atomic_fetch_sub:
589   case AtomicExpr::AO__atomic_fetch_sub:
590     Op = llvm::AtomicRMWInst::Sub;
591     break;
592 
593   case AtomicExpr::AO__opencl_atomic_fetch_min:
594     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
595                                                   : llvm::AtomicRMWInst::UMin;
596     break;
597 
598   case AtomicExpr::AO__opencl_atomic_fetch_max:
599     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
600                                                   : llvm::AtomicRMWInst::UMax;
601     break;
602 
603   case AtomicExpr::AO__atomic_and_fetch:
604     PostOp = llvm::Instruction::And;
605     // Fall through.
606   case AtomicExpr::AO__c11_atomic_fetch_and:
607   case AtomicExpr::AO__opencl_atomic_fetch_and:
608   case AtomicExpr::AO__atomic_fetch_and:
609     Op = llvm::AtomicRMWInst::And;
610     break;
611 
612   case AtomicExpr::AO__atomic_or_fetch:
613     PostOp = llvm::Instruction::Or;
614     // Fall through.
615   case AtomicExpr::AO__c11_atomic_fetch_or:
616   case AtomicExpr::AO__opencl_atomic_fetch_or:
617   case AtomicExpr::AO__atomic_fetch_or:
618     Op = llvm::AtomicRMWInst::Or;
619     break;
620 
621   case AtomicExpr::AO__atomic_xor_fetch:
622     PostOp = llvm::Instruction::Xor;
623     // Fall through.
624   case AtomicExpr::AO__c11_atomic_fetch_xor:
625   case AtomicExpr::AO__opencl_atomic_fetch_xor:
626   case AtomicExpr::AO__atomic_fetch_xor:
627     Op = llvm::AtomicRMWInst::Xor;
628     break;
629 
630   case AtomicExpr::AO__atomic_nand_fetch:
631     PostOp = llvm::Instruction::And; // the NOT is special cased below
632   // Fall through.
633   case AtomicExpr::AO__atomic_fetch_nand:
634     Op = llvm::AtomicRMWInst::Nand;
635     break;
636   }
637 
638   llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
639   llvm::AtomicRMWInst *RMWI =
640       CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
641   RMWI->setVolatile(E->isVolatile());
642 
643   // For __atomic_*_fetch operations, perform the operation again to
644   // determine the value which was written.
645   llvm::Value *Result = RMWI;
646   if (PostOp)
647     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
648   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
649     Result = CGF.Builder.CreateNot(Result);
650   CGF.Builder.CreateStore(Result, Dest);
651 }
652 
653 // This function emits any expression (scalar, complex, or aggregate)
654 // into a temporary alloca.
655 static Address
656 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
657   Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
658   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
659                        /*Init*/ true);
660   return DeclPtr;
661 }
662 
663 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
664                          Address Ptr, Address Val1, Address Val2,
665                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
666                          uint64_t Size, llvm::AtomicOrdering Order,
667                          llvm::Value *Scope) {
668   auto ScopeModel = Expr->getScopeModel();
669 
670   // LLVM atomic instructions always have synch scope. If clang atomic
671   // expression has no scope operand, use default LLVM synch scope.
672   if (!ScopeModel) {
673     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
674                  Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
675     return;
676   }
677 
678   // Handle constant scope.
679   if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
680     auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
681         ScopeModel->map(SC->getZExtValue()), CGF.CGM.getLLVMContext());
682     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
683                  Order, SCID);
684     return;
685   }
686 
687   // Handle non-constant scope.
688   auto &Builder = CGF.Builder;
689   auto Scopes = ScopeModel->getRuntimeValues();
690   llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
691   for (auto S : Scopes)
692     BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
693 
694   llvm::BasicBlock *ContBB =
695       CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
696 
697   auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
698   // If unsupported synch scope is encountered at run time, assume a fallback
699   // synch scope value.
700   auto FallBack = ScopeModel->getFallBackValue();
701   llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
702   for (auto S : Scopes) {
703     auto *B = BB[S];
704     if (S != FallBack)
705       SI->addCase(Builder.getInt32(S), B);
706 
707     Builder.SetInsertPoint(B);
708     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
709                  Order,
710                  CGF.getTargetHooks().getLLVMSyncScopeID(ScopeModel->map(S),
711                                                          CGF.getLLVMContext()));
712     Builder.CreateBr(ContBB);
713   }
714 
715   Builder.SetInsertPoint(ContBB);
716 }
717 
718 static void
719 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
720                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
721                   SourceLocation Loc, CharUnits SizeInChars) {
722   if (UseOptimizedLibcall) {
723     // Load value and pass it to the function directly.
724     CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
725     int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
726     ValTy =
727         CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
728     llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
729                                                 SizeInBits)->getPointerTo();
730     Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
731     Val = CGF.EmitLoadOfScalar(Ptr, false,
732                                CGF.getContext().getPointerType(ValTy),
733                                Loc);
734     // Coerce the value into an appropriately sized integer type.
735     Args.add(RValue::get(Val), ValTy);
736   } else {
737     // Non-optimized functions always take a reference.
738     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
739                          CGF.getContext().VoidPtrTy);
740   }
741 }
742 
743 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
744   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
745   QualType MemTy = AtomicTy;
746   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
747     MemTy = AT->getValueType();
748   CharUnits sizeChars, alignChars;
749   std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
750   uint64_t Size = sizeChars.getQuantity();
751   unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
752   bool UseLibcall = (sizeChars != alignChars ||
753                      getContext().toBits(sizeChars) > MaxInlineWidthInBits);
754 
755   llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
756 
757   Address Val1 = Address::invalid();
758   Address Val2 = Address::invalid();
759   Address Dest = Address::invalid();
760   Address Ptr(EmitScalarExpr(E->getPtr()), alignChars);
761 
762   if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
763       E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
764     LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
765     EmitAtomicInit(E->getVal1(), lvalue);
766     return RValue::get(nullptr);
767   }
768 
769   llvm::Value *Order = EmitScalarExpr(E->getOrder());
770   llvm::Value *Scope =
771       E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
772 
773   switch (E->getOp()) {
774   case AtomicExpr::AO__c11_atomic_init:
775   case AtomicExpr::AO__opencl_atomic_init:
776     llvm_unreachable("Already handled above with EmitAtomicInit!");
777 
778   case AtomicExpr::AO__c11_atomic_load:
779   case AtomicExpr::AO__opencl_atomic_load:
780   case AtomicExpr::AO__atomic_load_n:
781     break;
782 
783   case AtomicExpr::AO__atomic_load:
784     Dest = EmitPointerWithAlignment(E->getVal1());
785     break;
786 
787   case AtomicExpr::AO__atomic_store:
788     Val1 = EmitPointerWithAlignment(E->getVal1());
789     break;
790 
791   case AtomicExpr::AO__atomic_exchange:
792     Val1 = EmitPointerWithAlignment(E->getVal1());
793     Dest = EmitPointerWithAlignment(E->getVal2());
794     break;
795 
796   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
797   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
798   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
799   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
800   case AtomicExpr::AO__atomic_compare_exchange_n:
801   case AtomicExpr::AO__atomic_compare_exchange:
802     Val1 = EmitPointerWithAlignment(E->getVal1());
803     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
804       Val2 = EmitPointerWithAlignment(E->getVal2());
805     else
806       Val2 = EmitValToTemp(*this, E->getVal2());
807     OrderFail = EmitScalarExpr(E->getOrderFail());
808     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
809         E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
810       IsWeak = EmitScalarExpr(E->getWeak());
811     break;
812 
813   case AtomicExpr::AO__c11_atomic_fetch_add:
814   case AtomicExpr::AO__c11_atomic_fetch_sub:
815   case AtomicExpr::AO__opencl_atomic_fetch_add:
816   case AtomicExpr::AO__opencl_atomic_fetch_sub:
817     if (MemTy->isPointerType()) {
818       // For pointer arithmetic, we're required to do a bit of math:
819       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
820       // ... but only for the C11 builtins. The GNU builtins expect the
821       // user to multiply by sizeof(T).
822       QualType Val1Ty = E->getVal1()->getType();
823       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
824       CharUnits PointeeIncAmt =
825           getContext().getTypeSizeInChars(MemTy->getPointeeType());
826       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
827       auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
828       Val1 = Temp;
829       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
830       break;
831     }
832     // Fall through.
833   case AtomicExpr::AO__atomic_fetch_add:
834   case AtomicExpr::AO__atomic_fetch_sub:
835   case AtomicExpr::AO__atomic_add_fetch:
836   case AtomicExpr::AO__atomic_sub_fetch:
837   case AtomicExpr::AO__c11_atomic_store:
838   case AtomicExpr::AO__c11_atomic_exchange:
839   case AtomicExpr::AO__opencl_atomic_store:
840   case AtomicExpr::AO__opencl_atomic_exchange:
841   case AtomicExpr::AO__atomic_store_n:
842   case AtomicExpr::AO__atomic_exchange_n:
843   case AtomicExpr::AO__c11_atomic_fetch_and:
844   case AtomicExpr::AO__c11_atomic_fetch_or:
845   case AtomicExpr::AO__c11_atomic_fetch_xor:
846   case AtomicExpr::AO__opencl_atomic_fetch_and:
847   case AtomicExpr::AO__opencl_atomic_fetch_or:
848   case AtomicExpr::AO__opencl_atomic_fetch_xor:
849   case AtomicExpr::AO__opencl_atomic_fetch_min:
850   case AtomicExpr::AO__opencl_atomic_fetch_max:
851   case AtomicExpr::AO__atomic_fetch_and:
852   case AtomicExpr::AO__atomic_fetch_or:
853   case AtomicExpr::AO__atomic_fetch_xor:
854   case AtomicExpr::AO__atomic_fetch_nand:
855   case AtomicExpr::AO__atomic_and_fetch:
856   case AtomicExpr::AO__atomic_or_fetch:
857   case AtomicExpr::AO__atomic_xor_fetch:
858   case AtomicExpr::AO__atomic_nand_fetch:
859     Val1 = EmitValToTemp(*this, E->getVal1());
860     break;
861   }
862 
863   QualType RValTy = E->getType().getUnqualifiedType();
864 
865   // The inlined atomics only function on iN types, where N is a power of 2. We
866   // need to make sure (via temporaries if necessary) that all incoming values
867   // are compatible.
868   LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
869   AtomicInfo Atomics(*this, AtomicVal);
870 
871   Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
872   if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
873   if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
874   if (Dest.isValid())
875     Dest = Atomics.emitCastToAtomicIntPointer(Dest);
876   else if (E->isCmpXChg())
877     Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
878   else if (!RValTy->isVoidType())
879     Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
880 
881   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
882   if (UseLibcall) {
883     bool UseOptimizedLibcall = false;
884     switch (E->getOp()) {
885     case AtomicExpr::AO__c11_atomic_init:
886     case AtomicExpr::AO__opencl_atomic_init:
887       llvm_unreachable("Already handled above with EmitAtomicInit!");
888 
889     case AtomicExpr::AO__c11_atomic_fetch_add:
890     case AtomicExpr::AO__opencl_atomic_fetch_add:
891     case AtomicExpr::AO__atomic_fetch_add:
892     case AtomicExpr::AO__c11_atomic_fetch_and:
893     case AtomicExpr::AO__opencl_atomic_fetch_and:
894     case AtomicExpr::AO__atomic_fetch_and:
895     case AtomicExpr::AO__c11_atomic_fetch_or:
896     case AtomicExpr::AO__opencl_atomic_fetch_or:
897     case AtomicExpr::AO__atomic_fetch_or:
898     case AtomicExpr::AO__atomic_fetch_nand:
899     case AtomicExpr::AO__c11_atomic_fetch_sub:
900     case AtomicExpr::AO__opencl_atomic_fetch_sub:
901     case AtomicExpr::AO__atomic_fetch_sub:
902     case AtomicExpr::AO__c11_atomic_fetch_xor:
903     case AtomicExpr::AO__opencl_atomic_fetch_xor:
904     case AtomicExpr::AO__opencl_atomic_fetch_min:
905     case AtomicExpr::AO__opencl_atomic_fetch_max:
906     case AtomicExpr::AO__atomic_fetch_xor:
907     case AtomicExpr::AO__atomic_add_fetch:
908     case AtomicExpr::AO__atomic_and_fetch:
909     case AtomicExpr::AO__atomic_nand_fetch:
910     case AtomicExpr::AO__atomic_or_fetch:
911     case AtomicExpr::AO__atomic_sub_fetch:
912     case AtomicExpr::AO__atomic_xor_fetch:
913       // For these, only library calls for certain sizes exist.
914       UseOptimizedLibcall = true;
915       break;
916 
917     case AtomicExpr::AO__c11_atomic_load:
918     case AtomicExpr::AO__c11_atomic_store:
919     case AtomicExpr::AO__c11_atomic_exchange:
920     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
921     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
922     case AtomicExpr::AO__opencl_atomic_load:
923     case AtomicExpr::AO__opencl_atomic_store:
924     case AtomicExpr::AO__opencl_atomic_exchange:
925     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
926     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
927     case AtomicExpr::AO__atomic_load_n:
928     case AtomicExpr::AO__atomic_load:
929     case AtomicExpr::AO__atomic_store_n:
930     case AtomicExpr::AO__atomic_store:
931     case AtomicExpr::AO__atomic_exchange_n:
932     case AtomicExpr::AO__atomic_exchange:
933     case AtomicExpr::AO__atomic_compare_exchange_n:
934     case AtomicExpr::AO__atomic_compare_exchange:
935       // Only use optimized library calls for sizes for which they exist.
936       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
937         UseOptimizedLibcall = true;
938       break;
939     }
940 
941     CallArgList Args;
942     if (!UseOptimizedLibcall) {
943       // For non-optimized library calls, the size is the first parameter
944       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
945                getContext().getSizeType());
946     }
947     // Atomic address is the first or second parameter
948     // The OpenCL atomic library functions only accept pointer arguments to
949     // generic address space.
950     auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
951       if (!E->isOpenCL())
952         return V;
953       auto AS = PT->getAs<PointerType>()->getPointeeType().getAddressSpace();
954       if (AS == LangAS::opencl_generic)
955         return V;
956       auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
957       auto T = V->getType();
958       auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
959 
960       return getTargetHooks().performAddrSpaceCast(
961           *this, V, AS, LangAS::opencl_generic, DestType, false);
962     };
963 
964     Args.add(RValue::get(CastToGenericAddrSpace(
965                  EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
966              getContext().VoidPtrTy);
967 
968     std::string LibCallName;
969     QualType LoweredMemTy =
970       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
971     QualType RetTy;
972     bool HaveRetTy = false;
973     llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
974     switch (E->getOp()) {
975     case AtomicExpr::AO__c11_atomic_init:
976     case AtomicExpr::AO__opencl_atomic_init:
977       llvm_unreachable("Already handled!");
978 
979     // There is only one libcall for compare an exchange, because there is no
980     // optimisation benefit possible from a libcall version of a weak compare
981     // and exchange.
982     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
983     //                                void *desired, int success, int failure)
984     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
985     //                                  int success, int failure)
986     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
987     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
988     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
989     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
990     case AtomicExpr::AO__atomic_compare_exchange:
991     case AtomicExpr::AO__atomic_compare_exchange_n:
992       LibCallName = "__atomic_compare_exchange";
993       RetTy = getContext().BoolTy;
994       HaveRetTy = true;
995       Args.add(
996           RValue::get(CastToGenericAddrSpace(
997               EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
998           getContext().VoidPtrTy);
999       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
1000                         MemTy, E->getExprLoc(), sizeChars);
1001       Args.add(RValue::get(Order), getContext().IntTy);
1002       Order = OrderFail;
1003       break;
1004     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1005     //                        int order)
1006     // T __atomic_exchange_N(T *mem, T val, int order)
1007     case AtomicExpr::AO__c11_atomic_exchange:
1008     case AtomicExpr::AO__opencl_atomic_exchange:
1009     case AtomicExpr::AO__atomic_exchange_n:
1010     case AtomicExpr::AO__atomic_exchange:
1011       LibCallName = "__atomic_exchange";
1012       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1013                         MemTy, E->getExprLoc(), sizeChars);
1014       break;
1015     // void __atomic_store(size_t size, void *mem, void *val, int order)
1016     // void __atomic_store_N(T *mem, T val, int order)
1017     case AtomicExpr::AO__c11_atomic_store:
1018     case AtomicExpr::AO__opencl_atomic_store:
1019     case AtomicExpr::AO__atomic_store:
1020     case AtomicExpr::AO__atomic_store_n:
1021       LibCallName = "__atomic_store";
1022       RetTy = getContext().VoidTy;
1023       HaveRetTy = true;
1024       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1025                         MemTy, E->getExprLoc(), sizeChars);
1026       break;
1027     // void __atomic_load(size_t size, void *mem, void *return, int order)
1028     // T __atomic_load_N(T *mem, int order)
1029     case AtomicExpr::AO__c11_atomic_load:
1030     case AtomicExpr::AO__opencl_atomic_load:
1031     case AtomicExpr::AO__atomic_load:
1032     case AtomicExpr::AO__atomic_load_n:
1033       LibCallName = "__atomic_load";
1034       break;
1035     // T __atomic_add_fetch_N(T *mem, T val, int order)
1036     // T __atomic_fetch_add_N(T *mem, T val, int order)
1037     case AtomicExpr::AO__atomic_add_fetch:
1038       PostOp = llvm::Instruction::Add;
1039     // Fall through.
1040     case AtomicExpr::AO__c11_atomic_fetch_add:
1041     case AtomicExpr::AO__opencl_atomic_fetch_add:
1042     case AtomicExpr::AO__atomic_fetch_add:
1043       LibCallName = "__atomic_fetch_add";
1044       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1045                         LoweredMemTy, E->getExprLoc(), sizeChars);
1046       break;
1047     // T __atomic_and_fetch_N(T *mem, T val, int order)
1048     // T __atomic_fetch_and_N(T *mem, T val, int order)
1049     case AtomicExpr::AO__atomic_and_fetch:
1050       PostOp = llvm::Instruction::And;
1051     // Fall through.
1052     case AtomicExpr::AO__c11_atomic_fetch_and:
1053     case AtomicExpr::AO__opencl_atomic_fetch_and:
1054     case AtomicExpr::AO__atomic_fetch_and:
1055       LibCallName = "__atomic_fetch_and";
1056       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1057                         MemTy, E->getExprLoc(), sizeChars);
1058       break;
1059     // T __atomic_or_fetch_N(T *mem, T val, int order)
1060     // T __atomic_fetch_or_N(T *mem, T val, int order)
1061     case AtomicExpr::AO__atomic_or_fetch:
1062       PostOp = llvm::Instruction::Or;
1063     // Fall through.
1064     case AtomicExpr::AO__c11_atomic_fetch_or:
1065     case AtomicExpr::AO__opencl_atomic_fetch_or:
1066     case AtomicExpr::AO__atomic_fetch_or:
1067       LibCallName = "__atomic_fetch_or";
1068       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1069                         MemTy, E->getExprLoc(), sizeChars);
1070       break;
1071     // T __atomic_sub_fetch_N(T *mem, T val, int order)
1072     // T __atomic_fetch_sub_N(T *mem, T val, int order)
1073     case AtomicExpr::AO__atomic_sub_fetch:
1074       PostOp = llvm::Instruction::Sub;
1075     // Fall through.
1076     case AtomicExpr::AO__c11_atomic_fetch_sub:
1077     case AtomicExpr::AO__opencl_atomic_fetch_sub:
1078     case AtomicExpr::AO__atomic_fetch_sub:
1079       LibCallName = "__atomic_fetch_sub";
1080       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1081                         LoweredMemTy, E->getExprLoc(), sizeChars);
1082       break;
1083     // T __atomic_xor_fetch_N(T *mem, T val, int order)
1084     // T __atomic_fetch_xor_N(T *mem, T val, int order)
1085     case AtomicExpr::AO__atomic_xor_fetch:
1086       PostOp = llvm::Instruction::Xor;
1087     // Fall through.
1088     case AtomicExpr::AO__c11_atomic_fetch_xor:
1089     case AtomicExpr::AO__opencl_atomic_fetch_xor:
1090     case AtomicExpr::AO__atomic_fetch_xor:
1091       LibCallName = "__atomic_fetch_xor";
1092       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1093                         MemTy, E->getExprLoc(), sizeChars);
1094       break;
1095     case AtomicExpr::AO__opencl_atomic_fetch_min:
1096       LibCallName = E->getValueType()->isSignedIntegerType()
1097                         ? "__atomic_fetch_min"
1098                         : "__atomic_fetch_umin";
1099       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1100                         LoweredMemTy, E->getExprLoc(), sizeChars);
1101       break;
1102     case AtomicExpr::AO__opencl_atomic_fetch_max:
1103       LibCallName = E->getValueType()->isSignedIntegerType()
1104                         ? "__atomic_fetch_max"
1105                         : "__atomic_fetch_umax";
1106       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1107                         LoweredMemTy, E->getExprLoc(), sizeChars);
1108       break;
1109     // T __atomic_nand_fetch_N(T *mem, T val, int order)
1110     // T __atomic_fetch_nand_N(T *mem, T val, int order)
1111     case AtomicExpr::AO__atomic_nand_fetch:
1112       PostOp = llvm::Instruction::And; // the NOT is special cased below
1113     // Fall through.
1114     case AtomicExpr::AO__atomic_fetch_nand:
1115       LibCallName = "__atomic_fetch_nand";
1116       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1117                         MemTy, E->getExprLoc(), sizeChars);
1118       break;
1119     }
1120 
1121     if (E->isOpenCL()) {
1122       LibCallName = std::string("__opencl") +
1123           StringRef(LibCallName).drop_front(1).str();
1124 
1125     }
1126     // Optimized functions have the size in their name.
1127     if (UseOptimizedLibcall)
1128       LibCallName += "_" + llvm::utostr(Size);
1129     // By default, assume we return a value of the atomic type.
1130     if (!HaveRetTy) {
1131       if (UseOptimizedLibcall) {
1132         // Value is returned directly.
1133         // The function returns an appropriately sized integer type.
1134         RetTy = getContext().getIntTypeForBitwidth(
1135             getContext().toBits(sizeChars), /*Signed=*/false);
1136       } else {
1137         // Value is returned through parameter before the order.
1138         RetTy = getContext().VoidTy;
1139         Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1140                  getContext().VoidPtrTy);
1141       }
1142     }
1143     // order is always the last parameter
1144     Args.add(RValue::get(Order),
1145              getContext().IntTy);
1146     if (E->isOpenCL())
1147       Args.add(RValue::get(Scope), getContext().IntTy);
1148 
1149     // PostOp is only needed for the atomic_*_fetch operations, and
1150     // thus is only needed for and implemented in the
1151     // UseOptimizedLibcall codepath.
1152     assert(UseOptimizedLibcall || !PostOp);
1153 
1154     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1155     // The value is returned directly from the libcall.
1156     if (E->isCmpXChg())
1157       return Res;
1158 
1159     // The value is returned directly for optimized libcalls but the expr
1160     // provided an out-param.
1161     if (UseOptimizedLibcall && Res.getScalarVal()) {
1162       llvm::Value *ResVal = Res.getScalarVal();
1163       if (PostOp) {
1164         llvm::Value *LoadVal1 = Args[1].RV.getScalarVal();
1165         ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1166       }
1167       if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1168         ResVal = Builder.CreateNot(ResVal);
1169 
1170       Builder.CreateStore(
1171           ResVal,
1172           Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1173     }
1174 
1175     if (RValTy->isVoidType())
1176       return RValue::get(nullptr);
1177 
1178     return convertTempToRValue(
1179         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1180         RValTy, E->getExprLoc());
1181   }
1182 
1183   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1184                  E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1185                  E->getOp() == AtomicExpr::AO__atomic_store ||
1186                  E->getOp() == AtomicExpr::AO__atomic_store_n;
1187   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1188                 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1189                 E->getOp() == AtomicExpr::AO__atomic_load ||
1190                 E->getOp() == AtomicExpr::AO__atomic_load_n;
1191 
1192   if (isa<llvm::ConstantInt>(Order)) {
1193     auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1194     // We should not ever get to a case where the ordering isn't a valid C ABI
1195     // value, but it's hard to enforce that in general.
1196     if (llvm::isValidAtomicOrderingCABI(ord))
1197       switch ((llvm::AtomicOrderingCABI)ord) {
1198       case llvm::AtomicOrderingCABI::relaxed:
1199         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1200                      llvm::AtomicOrdering::Monotonic, Scope);
1201         break;
1202       case llvm::AtomicOrderingCABI::consume:
1203       case llvm::AtomicOrderingCABI::acquire:
1204         if (IsStore)
1205           break; // Avoid crashing on code with undefined behavior
1206         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1207                      llvm::AtomicOrdering::Acquire, Scope);
1208         break;
1209       case llvm::AtomicOrderingCABI::release:
1210         if (IsLoad)
1211           break; // Avoid crashing on code with undefined behavior
1212         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1213                      llvm::AtomicOrdering::Release, Scope);
1214         break;
1215       case llvm::AtomicOrderingCABI::acq_rel:
1216         if (IsLoad || IsStore)
1217           break; // Avoid crashing on code with undefined behavior
1218         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1219                      llvm::AtomicOrdering::AcquireRelease, Scope);
1220         break;
1221       case llvm::AtomicOrderingCABI::seq_cst:
1222         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1223                      llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1224         break;
1225       }
1226     if (RValTy->isVoidType())
1227       return RValue::get(nullptr);
1228 
1229     return convertTempToRValue(
1230         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1231         RValTy, E->getExprLoc());
1232   }
1233 
1234   // Long case, when Order isn't obviously constant.
1235 
1236   // Create all the relevant BB's
1237   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1238                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1239                    *SeqCstBB = nullptr;
1240   MonotonicBB = createBasicBlock("monotonic", CurFn);
1241   if (!IsStore)
1242     AcquireBB = createBasicBlock("acquire", CurFn);
1243   if (!IsLoad)
1244     ReleaseBB = createBasicBlock("release", CurFn);
1245   if (!IsLoad && !IsStore)
1246     AcqRelBB = createBasicBlock("acqrel", CurFn);
1247   SeqCstBB = createBasicBlock("seqcst", CurFn);
1248   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1249 
1250   // Create the switch for the split
1251   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1252   // doesn't matter unless someone is crazy enough to use something that
1253   // doesn't fold to a constant for the ordering.
1254   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1255   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1256 
1257   // Emit all the different atomics
1258   Builder.SetInsertPoint(MonotonicBB);
1259   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1260                llvm::AtomicOrdering::Monotonic, Scope);
1261   Builder.CreateBr(ContBB);
1262   if (!IsStore) {
1263     Builder.SetInsertPoint(AcquireBB);
1264     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1265                  llvm::AtomicOrdering::Acquire, Scope);
1266     Builder.CreateBr(ContBB);
1267     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1268                 AcquireBB);
1269     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1270                 AcquireBB);
1271   }
1272   if (!IsLoad) {
1273     Builder.SetInsertPoint(ReleaseBB);
1274     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1275                  llvm::AtomicOrdering::Release, Scope);
1276     Builder.CreateBr(ContBB);
1277     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1278                 ReleaseBB);
1279   }
1280   if (!IsLoad && !IsStore) {
1281     Builder.SetInsertPoint(AcqRelBB);
1282     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1283                  llvm::AtomicOrdering::AcquireRelease, Scope);
1284     Builder.CreateBr(ContBB);
1285     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1286                 AcqRelBB);
1287   }
1288   Builder.SetInsertPoint(SeqCstBB);
1289   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1290                llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1291   Builder.CreateBr(ContBB);
1292   SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1293               SeqCstBB);
1294 
1295   // Cleanup and return
1296   Builder.SetInsertPoint(ContBB);
1297   if (RValTy->isVoidType())
1298     return RValue::get(nullptr);
1299 
1300   assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1301   return convertTempToRValue(
1302       Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1303       RValTy, E->getExprLoc());
1304 }
1305 
1306 Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1307   unsigned addrspace =
1308     cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1309   llvm::IntegerType *ty =
1310     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1311   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1312 }
1313 
1314 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1315   llvm::Type *Ty = Addr.getElementType();
1316   uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1317   if (SourceSizeInBits != AtomicSizeInBits) {
1318     Address Tmp = CreateTempAlloca();
1319     CGF.Builder.CreateMemCpy(Tmp, Addr,
1320                              std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1321     Addr = Tmp;
1322   }
1323 
1324   return emitCastToAtomicIntPointer(Addr);
1325 }
1326 
1327 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1328                                              AggValueSlot resultSlot,
1329                                              SourceLocation loc,
1330                                              bool asValue) const {
1331   if (LVal.isSimple()) {
1332     if (EvaluationKind == TEK_Aggregate)
1333       return resultSlot.asRValue();
1334 
1335     // Drill into the padding structure if we have one.
1336     if (hasPadding())
1337       addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
1338 
1339     // Otherwise, just convert the temporary to an r-value using the
1340     // normal conversion routine.
1341     return CGF.convertTempToRValue(addr, getValueType(), loc);
1342   }
1343   if (!asValue)
1344     // Get RValue from temp memory as atomic for non-simple lvalues
1345     return RValue::get(CGF.Builder.CreateLoad(addr));
1346   if (LVal.isBitField())
1347     return CGF.EmitLoadOfBitfieldLValue(
1348         LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1349                              LVal.getBaseInfo()), loc);
1350   if (LVal.isVectorElt())
1351     return CGF.EmitLoadOfLValue(
1352         LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1353                               LVal.getBaseInfo()), loc);
1354   assert(LVal.isExtVectorElt());
1355   return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1356       addr, LVal.getExtVectorElts(), LVal.getType(),
1357       LVal.getBaseInfo()));
1358 }
1359 
1360 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1361                                              AggValueSlot ResultSlot,
1362                                              SourceLocation Loc,
1363                                              bool AsValue) const {
1364   // Try not to in some easy cases.
1365   assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1366   if (getEvaluationKind() == TEK_Scalar &&
1367       (((!LVal.isBitField() ||
1368          LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1369         !hasPadding()) ||
1370        !AsValue)) {
1371     auto *ValTy = AsValue
1372                       ? CGF.ConvertTypeForMem(ValueTy)
1373                       : getAtomicAddress().getType()->getPointerElementType();
1374     if (ValTy->isIntegerTy()) {
1375       assert(IntVal->getType() == ValTy && "Different integer types.");
1376       return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1377     } else if (ValTy->isPointerTy())
1378       return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1379     else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1380       return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1381   }
1382 
1383   // Create a temporary.  This needs to be big enough to hold the
1384   // atomic integer.
1385   Address Temp = Address::invalid();
1386   bool TempIsVolatile = false;
1387   if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1388     assert(!ResultSlot.isIgnored());
1389     Temp = ResultSlot.getAddress();
1390     TempIsVolatile = ResultSlot.isVolatile();
1391   } else {
1392     Temp = CreateTempAlloca();
1393   }
1394 
1395   // Slam the integer into the temporary.
1396   Address CastTemp = emitCastToAtomicIntPointer(Temp);
1397   CGF.Builder.CreateStore(IntVal, CastTemp)
1398       ->setVolatile(TempIsVolatile);
1399 
1400   return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1401 }
1402 
1403 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1404                                        llvm::AtomicOrdering AO, bool) {
1405   // void __atomic_load(size_t size, void *mem, void *return, int order);
1406   CallArgList Args;
1407   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1408   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1409            CGF.getContext().VoidPtrTy);
1410   Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1411            CGF.getContext().VoidPtrTy);
1412   Args.add(
1413       RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1414       CGF.getContext().IntTy);
1415   emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1416 }
1417 
1418 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1419                                           bool IsVolatile) {
1420   // Okay, we're doing this natively.
1421   Address Addr = getAtomicAddressAsAtomicIntPointer();
1422   llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1423   Load->setAtomic(AO);
1424 
1425   // Other decoration.
1426   if (IsVolatile)
1427     Load->setVolatile(true);
1428   if (LVal.getTBAAInfo())
1429     CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1430   return Load;
1431 }
1432 
1433 /// An LValue is a candidate for having its loads and stores be made atomic if
1434 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1435 /// performing such an operation can be performed without a libcall.
1436 bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1437   if (!CGM.getCodeGenOpts().MSVolatile) return false;
1438   AtomicInfo AI(*this, LV);
1439   bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1440   // An atomic is inline if we don't need to use a libcall.
1441   bool AtomicIsInline = !AI.shouldUseLibcall();
1442   // MSVC doesn't seem to do this for types wider than a pointer.
1443   if (getContext().getTypeSize(LV.getType()) >
1444       getContext().getTypeSize(getContext().getIntPtrType()))
1445     return false;
1446   return IsVolatile && AtomicIsInline;
1447 }
1448 
1449 RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1450                                        AggValueSlot Slot) {
1451   llvm::AtomicOrdering AO;
1452   bool IsVolatile = LV.isVolatileQualified();
1453   if (LV.getType()->isAtomicType()) {
1454     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1455   } else {
1456     AO = llvm::AtomicOrdering::Acquire;
1457     IsVolatile = true;
1458   }
1459   return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1460 }
1461 
1462 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1463                                   bool AsValue, llvm::AtomicOrdering AO,
1464                                   bool IsVolatile) {
1465   // Check whether we should use a library call.
1466   if (shouldUseLibcall()) {
1467     Address TempAddr = Address::invalid();
1468     if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1469       assert(getEvaluationKind() == TEK_Aggregate);
1470       TempAddr = ResultSlot.getAddress();
1471     } else
1472       TempAddr = CreateTempAlloca();
1473 
1474     EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1475 
1476     // Okay, turn that back into the original value or whole atomic (for
1477     // non-simple lvalues) type.
1478     return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1479   }
1480 
1481   // Okay, we're doing this natively.
1482   auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1483 
1484   // If we're ignoring an aggregate return, don't do anything.
1485   if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1486     return RValue::getAggregate(Address::invalid(), false);
1487 
1488   // Okay, turn that back into the original value or atomic (for non-simple
1489   // lvalues) type.
1490   return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1491 }
1492 
1493 /// Emit a load from an l-value of atomic type.  Note that the r-value
1494 /// we produce is an r-value of the atomic *value* type.
1495 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1496                                        llvm::AtomicOrdering AO, bool IsVolatile,
1497                                        AggValueSlot resultSlot) {
1498   AtomicInfo Atomics(*this, src);
1499   return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1500                                 IsVolatile);
1501 }
1502 
1503 /// Copy an r-value into memory as part of storing to an atomic type.
1504 /// This needs to create a bit-pattern suitable for atomic operations.
1505 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1506   assert(LVal.isSimple());
1507   // If we have an r-value, the rvalue should be of the atomic type,
1508   // which means that the caller is responsible for having zeroed
1509   // any padding.  Just do an aggregate copy of that type.
1510   if (rvalue.isAggregate()) {
1511     CGF.EmitAggregateCopy(getAtomicAddress(),
1512                           rvalue.getAggregateAddress(),
1513                           getAtomicType(),
1514                           (rvalue.isVolatileQualified()
1515                            || LVal.isVolatileQualified()));
1516     return;
1517   }
1518 
1519   // Okay, otherwise we're copying stuff.
1520 
1521   // Zero out the buffer if necessary.
1522   emitMemSetZeroIfNecessary();
1523 
1524   // Drill past the padding if present.
1525   LValue TempLVal = projectValue();
1526 
1527   // Okay, store the rvalue in.
1528   if (rvalue.isScalar()) {
1529     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1530   } else {
1531     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1532   }
1533 }
1534 
1535 
1536 /// Materialize an r-value into memory for the purposes of storing it
1537 /// to an atomic type.
1538 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1539   // Aggregate r-values are already in memory, and EmitAtomicStore
1540   // requires them to be values of the atomic type.
1541   if (rvalue.isAggregate())
1542     return rvalue.getAggregateAddress();
1543 
1544   // Otherwise, make a temporary and materialize into it.
1545   LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1546   AtomicInfo Atomics(CGF, TempLV);
1547   Atomics.emitCopyIntoMemory(rvalue);
1548   return TempLV.getAddress();
1549 }
1550 
1551 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1552   // If we've got a scalar value of the right size, try to avoid going
1553   // through memory.
1554   if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1555     llvm::Value *Value = RVal.getScalarVal();
1556     if (isa<llvm::IntegerType>(Value->getType()))
1557       return CGF.EmitToMemory(Value, ValueTy);
1558     else {
1559       llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1560           CGF.getLLVMContext(),
1561           LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1562       if (isa<llvm::PointerType>(Value->getType()))
1563         return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1564       else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1565         return CGF.Builder.CreateBitCast(Value, InputIntTy);
1566     }
1567   }
1568   // Otherwise, we need to go through memory.
1569   // Put the r-value in memory.
1570   Address Addr = materializeRValue(RVal);
1571 
1572   // Cast the temporary to the atomic int type and pull a value out.
1573   Addr = emitCastToAtomicIntPointer(Addr);
1574   return CGF.Builder.CreateLoad(Addr);
1575 }
1576 
1577 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1578     llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1579     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1580   // Do the atomic store.
1581   Address Addr = getAtomicAddressAsAtomicIntPointer();
1582   auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1583                                                ExpectedVal, DesiredVal,
1584                                                Success, Failure);
1585   // Other decoration.
1586   Inst->setVolatile(LVal.isVolatileQualified());
1587   Inst->setWeak(IsWeak);
1588 
1589   // Okay, turn that back into the original value type.
1590   auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1591   auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1592   return std::make_pair(PreviousVal, SuccessFailureVal);
1593 }
1594 
1595 llvm::Value *
1596 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1597                                              llvm::Value *DesiredAddr,
1598                                              llvm::AtomicOrdering Success,
1599                                              llvm::AtomicOrdering Failure) {
1600   // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1601   // void *desired, int success, int failure);
1602   CallArgList Args;
1603   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1604   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1605            CGF.getContext().VoidPtrTy);
1606   Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1607            CGF.getContext().VoidPtrTy);
1608   Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1609            CGF.getContext().VoidPtrTy);
1610   Args.add(RValue::get(
1611                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1612            CGF.getContext().IntTy);
1613   Args.add(RValue::get(
1614                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1615            CGF.getContext().IntTy);
1616   auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1617                                               CGF.getContext().BoolTy, Args);
1618 
1619   return SuccessFailureRVal.getScalarVal();
1620 }
1621 
1622 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1623     RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1624     llvm::AtomicOrdering Failure, bool IsWeak) {
1625   if (isStrongerThan(Failure, Success))
1626     // Don't assert on undefined behavior "failure argument shall be no stronger
1627     // than the success argument".
1628     Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1629 
1630   // Check whether we should use a library call.
1631   if (shouldUseLibcall()) {
1632     // Produce a source address.
1633     Address ExpectedAddr = materializeRValue(Expected);
1634     Address DesiredAddr = materializeRValue(Desired);
1635     auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1636                                                  DesiredAddr.getPointer(),
1637                                                  Success, Failure);
1638     return std::make_pair(
1639         convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1640                                   SourceLocation(), /*AsValue=*/false),
1641         Res);
1642   }
1643 
1644   // If we've got a scalar value of the right size, try to avoid going
1645   // through memory.
1646   auto *ExpectedVal = convertRValueToInt(Expected);
1647   auto *DesiredVal = convertRValueToInt(Desired);
1648   auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1649                                          Failure, IsWeak);
1650   return std::make_pair(
1651       ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1652                                 SourceLocation(), /*AsValue=*/false),
1653       Res.second);
1654 }
1655 
1656 static void
1657 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1658                       const llvm::function_ref<RValue(RValue)> &UpdateOp,
1659                       Address DesiredAddr) {
1660   RValue UpRVal;
1661   LValue AtomicLVal = Atomics.getAtomicLValue();
1662   LValue DesiredLVal;
1663   if (AtomicLVal.isSimple()) {
1664     UpRVal = OldRVal;
1665     DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1666   } else {
1667     // Build new lvalue for temp address
1668     Address Ptr = Atomics.materializeRValue(OldRVal);
1669     LValue UpdateLVal;
1670     if (AtomicLVal.isBitField()) {
1671       UpdateLVal =
1672           LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1673                                AtomicLVal.getType(),
1674                                AtomicLVal.getBaseInfo());
1675       DesiredLVal =
1676           LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1677                                AtomicLVal.getType(),
1678                                AtomicLVal.getBaseInfo());
1679     } else if (AtomicLVal.isVectorElt()) {
1680       UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1681                                          AtomicLVal.getType(),
1682                                          AtomicLVal.getBaseInfo());
1683       DesiredLVal = LValue::MakeVectorElt(
1684           DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1685           AtomicLVal.getBaseInfo());
1686     } else {
1687       assert(AtomicLVal.isExtVectorElt());
1688       UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1689                                             AtomicLVal.getType(),
1690                                             AtomicLVal.getBaseInfo());
1691       DesiredLVal = LValue::MakeExtVectorElt(
1692           DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1693           AtomicLVal.getBaseInfo());
1694     }
1695     UpdateLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1696     DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1697     UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1698   }
1699   // Store new value in the corresponding memory area
1700   RValue NewRVal = UpdateOp(UpRVal);
1701   if (NewRVal.isScalar()) {
1702     CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1703   } else {
1704     assert(NewRVal.isComplex());
1705     CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1706                            /*isInit=*/false);
1707   }
1708 }
1709 
1710 void AtomicInfo::EmitAtomicUpdateLibcall(
1711     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1712     bool IsVolatile) {
1713   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1714 
1715   Address ExpectedAddr = CreateTempAlloca();
1716 
1717   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1718   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1719   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1720   CGF.EmitBlock(ContBB);
1721   Address DesiredAddr = CreateTempAlloca();
1722   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1723       requiresMemSetZero(getAtomicAddress().getElementType())) {
1724     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1725     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1726   }
1727   auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1728                                            AggValueSlot::ignored(),
1729                                            SourceLocation(), /*AsValue=*/false);
1730   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1731   auto *Res =
1732       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1733                                        DesiredAddr.getPointer(),
1734                                        AO, Failure);
1735   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1736   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1737 }
1738 
1739 void AtomicInfo::EmitAtomicUpdateOp(
1740     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1741     bool IsVolatile) {
1742   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1743 
1744   // Do the atomic load.
1745   auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1746   // For non-simple lvalues perform compare-and-swap procedure.
1747   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1748   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1749   auto *CurBB = CGF.Builder.GetInsertBlock();
1750   CGF.EmitBlock(ContBB);
1751   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1752                                              /*NumReservedValues=*/2);
1753   PHI->addIncoming(OldVal, CurBB);
1754   Address NewAtomicAddr = CreateTempAlloca();
1755   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1756   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1757       requiresMemSetZero(getAtomicAddress().getElementType())) {
1758     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1759   }
1760   auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1761                                            SourceLocation(), /*AsValue=*/false);
1762   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1763   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1764   // Try to write new value using cmpxchg operation
1765   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1766   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1767   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1768   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1769 }
1770 
1771 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1772                                   RValue UpdateRVal, Address DesiredAddr) {
1773   LValue AtomicLVal = Atomics.getAtomicLValue();
1774   LValue DesiredLVal;
1775   // Build new lvalue for temp address
1776   if (AtomicLVal.isBitField()) {
1777     DesiredLVal =
1778         LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1779                              AtomicLVal.getType(),
1780                              AtomicLVal.getBaseInfo());
1781   } else if (AtomicLVal.isVectorElt()) {
1782     DesiredLVal =
1783         LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1784                               AtomicLVal.getType(),
1785                               AtomicLVal.getBaseInfo());
1786   } else {
1787     assert(AtomicLVal.isExtVectorElt());
1788     DesiredLVal = LValue::MakeExtVectorElt(
1789         DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1790         AtomicLVal.getBaseInfo());
1791   }
1792   DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1793   // Store new value in the corresponding memory area
1794   assert(UpdateRVal.isScalar());
1795   CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1796 }
1797 
1798 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1799                                          RValue UpdateRVal, bool IsVolatile) {
1800   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1801 
1802   Address ExpectedAddr = CreateTempAlloca();
1803 
1804   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1805   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1806   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1807   CGF.EmitBlock(ContBB);
1808   Address DesiredAddr = CreateTempAlloca();
1809   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1810       requiresMemSetZero(getAtomicAddress().getElementType())) {
1811     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1812     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1813   }
1814   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1815   auto *Res =
1816       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1817                                        DesiredAddr.getPointer(),
1818                                        AO, Failure);
1819   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1820   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1821 }
1822 
1823 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1824                                     bool IsVolatile) {
1825   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1826 
1827   // Do the atomic load.
1828   auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1829   // For non-simple lvalues perform compare-and-swap procedure.
1830   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1831   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1832   auto *CurBB = CGF.Builder.GetInsertBlock();
1833   CGF.EmitBlock(ContBB);
1834   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1835                                              /*NumReservedValues=*/2);
1836   PHI->addIncoming(OldVal, CurBB);
1837   Address NewAtomicAddr = CreateTempAlloca();
1838   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1839   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1840       requiresMemSetZero(getAtomicAddress().getElementType())) {
1841     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1842   }
1843   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1844   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1845   // Try to write new value using cmpxchg operation
1846   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1847   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1848   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1849   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1850 }
1851 
1852 void AtomicInfo::EmitAtomicUpdate(
1853     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1854     bool IsVolatile) {
1855   if (shouldUseLibcall()) {
1856     EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1857   } else {
1858     EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1859   }
1860 }
1861 
1862 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1863                                   bool IsVolatile) {
1864   if (shouldUseLibcall()) {
1865     EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1866   } else {
1867     EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1868   }
1869 }
1870 
1871 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1872                                       bool isInit) {
1873   bool IsVolatile = lvalue.isVolatileQualified();
1874   llvm::AtomicOrdering AO;
1875   if (lvalue.getType()->isAtomicType()) {
1876     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1877   } else {
1878     AO = llvm::AtomicOrdering::Release;
1879     IsVolatile = true;
1880   }
1881   return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1882 }
1883 
1884 /// Emit a store to an l-value of atomic type.
1885 ///
1886 /// Note that the r-value is expected to be an r-value *of the atomic
1887 /// type*; this means that for aggregate r-values, it should include
1888 /// storage for any padding that was necessary.
1889 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1890                                       llvm::AtomicOrdering AO, bool IsVolatile,
1891                                       bool isInit) {
1892   // If this is an aggregate r-value, it should agree in type except
1893   // maybe for address-space qualification.
1894   assert(!rvalue.isAggregate() ||
1895          rvalue.getAggregateAddress().getElementType()
1896            == dest.getAddress().getElementType());
1897 
1898   AtomicInfo atomics(*this, dest);
1899   LValue LVal = atomics.getAtomicLValue();
1900 
1901   // If this is an initialization, just put the value there normally.
1902   if (LVal.isSimple()) {
1903     if (isInit) {
1904       atomics.emitCopyIntoMemory(rvalue);
1905       return;
1906     }
1907 
1908     // Check whether we should use a library call.
1909     if (atomics.shouldUseLibcall()) {
1910       // Produce a source address.
1911       Address srcAddr = atomics.materializeRValue(rvalue);
1912 
1913       // void __atomic_store(size_t size, void *mem, void *val, int order)
1914       CallArgList args;
1915       args.add(RValue::get(atomics.getAtomicSizeValue()),
1916                getContext().getSizeType());
1917       args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
1918                getContext().VoidPtrTy);
1919       args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
1920                getContext().VoidPtrTy);
1921       args.add(
1922           RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
1923           getContext().IntTy);
1924       emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1925       return;
1926     }
1927 
1928     // Okay, we're doing this natively.
1929     llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1930 
1931     // Do the atomic store.
1932     Address addr =
1933         atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1934     intValue = Builder.CreateIntCast(
1935         intValue, addr.getElementType(), /*isSigned=*/false);
1936     llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1937 
1938     // Initializations don't need to be atomic.
1939     if (!isInit)
1940       store->setAtomic(AO);
1941 
1942     // Other decoration.
1943     if (IsVolatile)
1944       store->setVolatile(true);
1945     if (dest.getTBAAInfo())
1946       CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
1947     return;
1948   }
1949 
1950   // Emit simple atomic update operation.
1951   atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1952 }
1953 
1954 /// Emit a compare-and-exchange op for atomic type.
1955 ///
1956 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
1957     LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1958     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1959     AggValueSlot Slot) {
1960   // If this is an aggregate r-value, it should agree in type except
1961   // maybe for address-space qualification.
1962   assert(!Expected.isAggregate() ||
1963          Expected.getAggregateAddress().getElementType() ==
1964              Obj.getAddress().getElementType());
1965   assert(!Desired.isAggregate() ||
1966          Desired.getAggregateAddress().getElementType() ==
1967              Obj.getAddress().getElementType());
1968   AtomicInfo Atomics(*this, Obj);
1969 
1970   return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1971                                            IsWeak);
1972 }
1973 
1974 void CodeGenFunction::EmitAtomicUpdate(
1975     LValue LVal, llvm::AtomicOrdering AO,
1976     const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
1977   AtomicInfo Atomics(*this, LVal);
1978   Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
1979 }
1980 
1981 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1982   AtomicInfo atomics(*this, dest);
1983 
1984   switch (atomics.getEvaluationKind()) {
1985   case TEK_Scalar: {
1986     llvm::Value *value = EmitScalarExpr(init);
1987     atomics.emitCopyIntoMemory(RValue::get(value));
1988     return;
1989   }
1990 
1991   case TEK_Complex: {
1992     ComplexPairTy value = EmitComplexExpr(init);
1993     atomics.emitCopyIntoMemory(RValue::getComplex(value));
1994     return;
1995   }
1996 
1997   case TEK_Aggregate: {
1998     // Fix up the destination if the initializer isn't an expression
1999     // of atomic type.
2000     bool Zeroed = false;
2001     if (!init->getType()->isAtomicType()) {
2002       Zeroed = atomics.emitMemSetZeroIfNecessary();
2003       dest = atomics.projectValue();
2004     }
2005 
2006     // Evaluate the expression directly into the destination.
2007     AggValueSlot slot = AggValueSlot::forLValue(dest,
2008                                         AggValueSlot::IsNotDestructed,
2009                                         AggValueSlot::DoesNotNeedGCBarriers,
2010                                         AggValueSlot::IsNotAliased,
2011                                         Zeroed ? AggValueSlot::IsZeroed :
2012                                                  AggValueSlot::IsNotZeroed);
2013 
2014     EmitAggExpr(init, slot);
2015     return;
2016   }
2017   }
2018   llvm_unreachable("bad evaluation kind");
2019 }
2020