1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the code for emitting atomic operations.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "CGRecordLayout.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/CodeGen/CGFunctionInfo.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/Operator.h"
25 
26 using namespace clang;
27 using namespace CodeGen;
28 
29 namespace {
30   class AtomicInfo {
31     CodeGenFunction &CGF;
32     QualType AtomicTy;
33     QualType ValueTy;
34     uint64_t AtomicSizeInBits;
35     uint64_t ValueSizeInBits;
36     CharUnits AtomicAlign;
37     CharUnits ValueAlign;
38     CharUnits LValueAlign;
39     TypeEvaluationKind EvaluationKind;
40     bool UseLibcall;
41     LValue LVal;
42     CGBitFieldInfo BFI;
43   public:
44     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
45         : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
46           EvaluationKind(TEK_Scalar), UseLibcall(true) {
47       assert(!lvalue.isGlobalReg());
48       ASTContext &C = CGF.getContext();
49       if (lvalue.isSimple()) {
50         AtomicTy = lvalue.getType();
51         if (auto *ATy = AtomicTy->getAs<AtomicType>())
52           ValueTy = ATy->getValueType();
53         else
54           ValueTy = AtomicTy;
55         EvaluationKind = CGF.getEvaluationKind(ValueTy);
56 
57         uint64_t ValueAlignInBits;
58         uint64_t AtomicAlignInBits;
59         TypeInfo ValueTI = C.getTypeInfo(ValueTy);
60         ValueSizeInBits = ValueTI.Width;
61         ValueAlignInBits = ValueTI.Align;
62 
63         TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
64         AtomicSizeInBits = AtomicTI.Width;
65         AtomicAlignInBits = AtomicTI.Align;
66 
67         assert(ValueSizeInBits <= AtomicSizeInBits);
68         assert(ValueAlignInBits <= AtomicAlignInBits);
69 
70         AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
71         ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
72         if (lvalue.getAlignment().isZero())
73           lvalue.setAlignment(AtomicAlign);
74 
75         LVal = lvalue;
76       } else if (lvalue.isBitField()) {
77         ValueTy = lvalue.getType();
78         ValueSizeInBits = C.getTypeSize(ValueTy);
79         auto &OrigBFI = lvalue.getBitFieldInfo();
80         auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
81         AtomicSizeInBits = C.toBits(
82             C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
83                 .alignTo(lvalue.getAlignment()));
84         auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
85         auto OffsetInChars =
86             (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
87             lvalue.getAlignment();
88         VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
89             VoidPtrAddr, OffsetInChars.getQuantity());
90         auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
91             VoidPtrAddr,
92             CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
93             "atomic_bitfield_base");
94         BFI = OrigBFI;
95         BFI.Offset = Offset;
96         BFI.StorageSize = AtomicSizeInBits;
97         BFI.StorageOffset += OffsetInChars;
98         LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
99                                     BFI, lvalue.getType(), lvalue.getBaseInfo(),
100                                     lvalue.getTBAAInfo());
101         AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
102         if (AtomicTy.isNull()) {
103           llvm::APInt Size(
104               /*numBits=*/32,
105               C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
106           AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
107                                             /*IndexTypeQuals=*/0);
108         }
109         AtomicAlign = ValueAlign = lvalue.getAlignment();
110       } else if (lvalue.isVectorElt()) {
111         ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
112         ValueSizeInBits = C.getTypeSize(ValueTy);
113         AtomicTy = lvalue.getType();
114         AtomicSizeInBits = C.getTypeSize(AtomicTy);
115         AtomicAlign = ValueAlign = lvalue.getAlignment();
116         LVal = lvalue;
117       } else {
118         assert(lvalue.isExtVectorElt());
119         ValueTy = lvalue.getType();
120         ValueSizeInBits = C.getTypeSize(ValueTy);
121         AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
122             lvalue.getType(), lvalue.getExtVectorAddress()
123                                   .getElementType()->getVectorNumElements());
124         AtomicSizeInBits = C.getTypeSize(AtomicTy);
125         AtomicAlign = ValueAlign = lvalue.getAlignment();
126         LVal = lvalue;
127       }
128       UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
129           AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
130     }
131 
132     QualType getAtomicType() const { return AtomicTy; }
133     QualType getValueType() const { return ValueTy; }
134     CharUnits getAtomicAlignment() const { return AtomicAlign; }
135     CharUnits getValueAlignment() const { return ValueAlign; }
136     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
137     uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
138     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
139     bool shouldUseLibcall() const { return UseLibcall; }
140     const LValue &getAtomicLValue() const { return LVal; }
141     llvm::Value *getAtomicPointer() const {
142       if (LVal.isSimple())
143         return LVal.getPointer();
144       else if (LVal.isBitField())
145         return LVal.getBitFieldPointer();
146       else if (LVal.isVectorElt())
147         return LVal.getVectorPointer();
148       assert(LVal.isExtVectorElt());
149       return LVal.getExtVectorPointer();
150     }
151     Address getAtomicAddress() const {
152       return Address(getAtomicPointer(), getAtomicAlignment());
153     }
154 
155     Address getAtomicAddressAsAtomicIntPointer() const {
156       return emitCastToAtomicIntPointer(getAtomicAddress());
157     }
158 
159     /// Is the atomic size larger than the underlying value type?
160     ///
161     /// Note that the absence of padding does not mean that atomic
162     /// objects are completely interchangeable with non-atomic
163     /// objects: we might have promoted the alignment of a type
164     /// without making it bigger.
165     bool hasPadding() const {
166       return (ValueSizeInBits != AtomicSizeInBits);
167     }
168 
169     bool emitMemSetZeroIfNecessary() const;
170 
171     llvm::Value *getAtomicSizeValue() const {
172       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
173       return CGF.CGM.getSize(size);
174     }
175 
176     /// Cast the given pointer to an integer pointer suitable for atomic
177     /// operations if the source.
178     Address emitCastToAtomicIntPointer(Address Addr) const;
179 
180     /// If Addr is compatible with the iN that will be used for an atomic
181     /// operation, bitcast it. Otherwise, create a temporary that is suitable
182     /// and copy the value across.
183     Address convertToAtomicIntPointer(Address Addr) const;
184 
185     /// Turn an atomic-layout object into an r-value.
186     RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
187                                      SourceLocation loc, bool AsValue) const;
188 
189     /// \brief Converts a rvalue to integer value.
190     llvm::Value *convertRValueToInt(RValue RVal) const;
191 
192     RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
193                                      AggValueSlot ResultSlot,
194                                      SourceLocation Loc, bool AsValue) const;
195 
196     /// Copy an atomic r-value into atomic-layout memory.
197     void emitCopyIntoMemory(RValue rvalue) const;
198 
199     /// Project an l-value down to the value field.
200     LValue projectValue() const {
201       assert(LVal.isSimple());
202       Address addr = getAtomicAddress();
203       if (hasPadding())
204         addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
205 
206       return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
207                               LVal.getBaseInfo(), LVal.getTBAAInfo());
208     }
209 
210     /// \brief Emits atomic load.
211     /// \returns Loaded value.
212     RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
213                           bool AsValue, llvm::AtomicOrdering AO,
214                           bool IsVolatile);
215 
216     /// \brief Emits atomic compare-and-exchange sequence.
217     /// \param Expected Expected value.
218     /// \param Desired Desired value.
219     /// \param Success Atomic ordering for success operation.
220     /// \param Failure Atomic ordering for failed operation.
221     /// \param IsWeak true if atomic operation is weak, false otherwise.
222     /// \returns Pair of values: previous value from storage (value type) and
223     /// boolean flag (i1 type) with true if success and false otherwise.
224     std::pair<RValue, llvm::Value *>
225     EmitAtomicCompareExchange(RValue Expected, RValue Desired,
226                               llvm::AtomicOrdering Success =
227                                   llvm::AtomicOrdering::SequentiallyConsistent,
228                               llvm::AtomicOrdering Failure =
229                                   llvm::AtomicOrdering::SequentiallyConsistent,
230                               bool IsWeak = false);
231 
232     /// \brief Emits atomic update.
233     /// \param AO Atomic ordering.
234     /// \param UpdateOp Update operation for the current lvalue.
235     void EmitAtomicUpdate(llvm::AtomicOrdering AO,
236                           const llvm::function_ref<RValue(RValue)> &UpdateOp,
237                           bool IsVolatile);
238     /// \brief Emits atomic update.
239     /// \param AO Atomic ordering.
240     void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
241                           bool IsVolatile);
242 
243     /// Materialize an atomic r-value in atomic-layout memory.
244     Address materializeRValue(RValue rvalue) const;
245 
246     /// \brief Creates temp alloca for intermediate operations on atomic value.
247     Address CreateTempAlloca() const;
248   private:
249     bool requiresMemSetZero(llvm::Type *type) const;
250 
251 
252     /// \brief Emits atomic load as a libcall.
253     void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
254                                llvm::AtomicOrdering AO, bool IsVolatile);
255     /// \brief Emits atomic load as LLVM instruction.
256     llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
257     /// \brief Emits atomic compare-and-exchange op as a libcall.
258     llvm::Value *EmitAtomicCompareExchangeLibcall(
259         llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
260         llvm::AtomicOrdering Success =
261             llvm::AtomicOrdering::SequentiallyConsistent,
262         llvm::AtomicOrdering Failure =
263             llvm::AtomicOrdering::SequentiallyConsistent);
264     /// \brief Emits atomic compare-and-exchange op as LLVM instruction.
265     std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
266         llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
267         llvm::AtomicOrdering Success =
268             llvm::AtomicOrdering::SequentiallyConsistent,
269         llvm::AtomicOrdering Failure =
270             llvm::AtomicOrdering::SequentiallyConsistent,
271         bool IsWeak = false);
272     /// \brief Emit atomic update as libcalls.
273     void
274     EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
275                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
276                             bool IsVolatile);
277     /// \brief Emit atomic update as LLVM instructions.
278     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
279                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
280                             bool IsVolatile);
281     /// \brief Emit atomic update as libcalls.
282     void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
283                                  bool IsVolatile);
284     /// \brief Emit atomic update as LLVM instructions.
285     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
286                             bool IsVolatile);
287   };
288 }
289 
290 Address AtomicInfo::CreateTempAlloca() const {
291   Address TempAlloca = CGF.CreateMemTemp(
292       (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
293                                                                 : AtomicTy,
294       getAtomicAlignment(),
295       "atomic-temp");
296   // Cast to pointer to value type for bitfields.
297   if (LVal.isBitField())
298     return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
299         TempAlloca, getAtomicAddress().getType());
300   return TempAlloca;
301 }
302 
303 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
304                                 StringRef fnName,
305                                 QualType resultType,
306                                 CallArgList &args) {
307   const CGFunctionInfo &fnInfo =
308     CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
309   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
310   llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
311   auto callee = CGCallee::forDirect(fn);
312   return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
313 }
314 
315 /// Does a store of the given IR type modify the full expected width?
316 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
317                            uint64_t expectedSize) {
318   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
319 }
320 
321 /// Does the atomic type require memsetting to zero before initialization?
322 ///
323 /// The IR type is provided as a way of making certain queries faster.
324 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
325   // If the atomic type has size padding, we definitely need a memset.
326   if (hasPadding()) return true;
327 
328   // Otherwise, do some simple heuristics to try to avoid it:
329   switch (getEvaluationKind()) {
330   // For scalars and complexes, check whether the store size of the
331   // type uses the full size.
332   case TEK_Scalar:
333     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
334   case TEK_Complex:
335     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
336                            AtomicSizeInBits / 2);
337 
338   // Padding in structs has an undefined bit pattern.  User beware.
339   case TEK_Aggregate:
340     return false;
341   }
342   llvm_unreachable("bad evaluation kind");
343 }
344 
345 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
346   assert(LVal.isSimple());
347   llvm::Value *addr = LVal.getPointer();
348   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
349     return false;
350 
351   CGF.Builder.CreateMemSet(
352       addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
353       CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
354       LVal.getAlignment().getQuantity());
355   return true;
356 }
357 
358 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
359                               Address Dest, Address Ptr,
360                               Address Val1, Address Val2,
361                               uint64_t Size,
362                               llvm::AtomicOrdering SuccessOrder,
363                               llvm::AtomicOrdering FailureOrder,
364                               llvm::SyncScope::ID Scope) {
365   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
366   llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
367   llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
368 
369   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
370       Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
371       Scope);
372   Pair->setVolatile(E->isVolatile());
373   Pair->setWeak(IsWeak);
374 
375   // Cmp holds the result of the compare-exchange operation: true on success,
376   // false on failure.
377   llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
378   llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
379 
380   // This basic block is used to hold the store instruction if the operation
381   // failed.
382   llvm::BasicBlock *StoreExpectedBB =
383       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
384 
385   // This basic block is the exit point of the operation, we should end up
386   // here regardless of whether or not the operation succeeded.
387   llvm::BasicBlock *ContinueBB =
388       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
389 
390   // Update Expected if Expected isn't equal to Old, otherwise branch to the
391   // exit point.
392   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
393 
394   CGF.Builder.SetInsertPoint(StoreExpectedBB);
395   // Update the memory at Expected with Old's value.
396   CGF.Builder.CreateStore(Old, Val1);
397   // Finally, branch to the exit point.
398   CGF.Builder.CreateBr(ContinueBB);
399 
400   CGF.Builder.SetInsertPoint(ContinueBB);
401   // Update the memory at Dest with Cmp's value.
402   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
403 }
404 
405 /// Given an ordering required on success, emit all possible cmpxchg
406 /// instructions to cope with the provided (but possibly only dynamically known)
407 /// FailureOrder.
408 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
409                                         bool IsWeak, Address Dest, Address Ptr,
410                                         Address Val1, Address Val2,
411                                         llvm::Value *FailureOrderVal,
412                                         uint64_t Size,
413                                         llvm::AtomicOrdering SuccessOrder,
414                                         llvm::SyncScope::ID Scope) {
415   llvm::AtomicOrdering FailureOrder;
416   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
417     auto FOS = FO->getSExtValue();
418     if (!llvm::isValidAtomicOrderingCABI(FOS))
419       FailureOrder = llvm::AtomicOrdering::Monotonic;
420     else
421       switch ((llvm::AtomicOrderingCABI)FOS) {
422       case llvm::AtomicOrderingCABI::relaxed:
423       case llvm::AtomicOrderingCABI::release:
424       case llvm::AtomicOrderingCABI::acq_rel:
425         FailureOrder = llvm::AtomicOrdering::Monotonic;
426         break;
427       case llvm::AtomicOrderingCABI::consume:
428       case llvm::AtomicOrderingCABI::acquire:
429         FailureOrder = llvm::AtomicOrdering::Acquire;
430         break;
431       case llvm::AtomicOrderingCABI::seq_cst:
432         FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
433         break;
434       }
435     if (isStrongerThan(FailureOrder, SuccessOrder)) {
436       // Don't assert on undefined behavior "failure argument shall be no
437       // stronger than the success argument".
438       FailureOrder =
439           llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
440     }
441     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
442                       FailureOrder, Scope);
443     return;
444   }
445 
446   // Create all the relevant BB's
447   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
448                    *SeqCstBB = nullptr;
449   MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
450   if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
451       SuccessOrder != llvm::AtomicOrdering::Release)
452     AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
453   if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
454     SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
455 
456   llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
457 
458   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
459 
460   // Emit all the different atomics
461 
462   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
463   // doesn't matter unless someone is crazy enough to use something that
464   // doesn't fold to a constant for the ordering.
465   CGF.Builder.SetInsertPoint(MonotonicBB);
466   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
467                     Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
468   CGF.Builder.CreateBr(ContBB);
469 
470   if (AcquireBB) {
471     CGF.Builder.SetInsertPoint(AcquireBB);
472     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
473                       Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
474     CGF.Builder.CreateBr(ContBB);
475     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
476                 AcquireBB);
477     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
478                 AcquireBB);
479   }
480   if (SeqCstBB) {
481     CGF.Builder.SetInsertPoint(SeqCstBB);
482     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
483                       llvm::AtomicOrdering::SequentiallyConsistent, Scope);
484     CGF.Builder.CreateBr(ContBB);
485     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
486                 SeqCstBB);
487   }
488 
489   CGF.Builder.SetInsertPoint(ContBB);
490 }
491 
492 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
493                          Address Ptr, Address Val1, Address Val2,
494                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
495                          uint64_t Size, llvm::AtomicOrdering Order,
496                          llvm::SyncScope::ID Scope) {
497   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
498   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
499 
500   switch (E->getOp()) {
501   case AtomicExpr::AO__c11_atomic_init:
502   case AtomicExpr::AO__opencl_atomic_init:
503     llvm_unreachable("Already handled!");
504 
505   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
506   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
507     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
508                                 FailureOrder, Size, Order, Scope);
509     return;
510   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
511   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
512     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
513                                 FailureOrder, Size, Order, Scope);
514     return;
515   case AtomicExpr::AO__atomic_compare_exchange:
516   case AtomicExpr::AO__atomic_compare_exchange_n: {
517     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
518       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
519                                   Val1, Val2, FailureOrder, Size, Order, Scope);
520     } else {
521       // Create all the relevant BB's
522       llvm::BasicBlock *StrongBB =
523           CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
524       llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
525       llvm::BasicBlock *ContBB =
526           CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
527 
528       llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
529       SI->addCase(CGF.Builder.getInt1(false), StrongBB);
530 
531       CGF.Builder.SetInsertPoint(StrongBB);
532       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
533                                   FailureOrder, Size, Order, Scope);
534       CGF.Builder.CreateBr(ContBB);
535 
536       CGF.Builder.SetInsertPoint(WeakBB);
537       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
538                                   FailureOrder, Size, Order, Scope);
539       CGF.Builder.CreateBr(ContBB);
540 
541       CGF.Builder.SetInsertPoint(ContBB);
542     }
543     return;
544   }
545   case AtomicExpr::AO__c11_atomic_load:
546   case AtomicExpr::AO__opencl_atomic_load:
547   case AtomicExpr::AO__atomic_load_n:
548   case AtomicExpr::AO__atomic_load: {
549     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
550     Load->setAtomic(Order, Scope);
551     Load->setVolatile(E->isVolatile());
552     CGF.Builder.CreateStore(Load, Dest);
553     return;
554   }
555 
556   case AtomicExpr::AO__c11_atomic_store:
557   case AtomicExpr::AO__opencl_atomic_store:
558   case AtomicExpr::AO__atomic_store:
559   case AtomicExpr::AO__atomic_store_n: {
560     llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
561     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
562     Store->setAtomic(Order, Scope);
563     Store->setVolatile(E->isVolatile());
564     return;
565   }
566 
567   case AtomicExpr::AO__c11_atomic_exchange:
568   case AtomicExpr::AO__opencl_atomic_exchange:
569   case AtomicExpr::AO__atomic_exchange_n:
570   case AtomicExpr::AO__atomic_exchange:
571     Op = llvm::AtomicRMWInst::Xchg;
572     break;
573 
574   case AtomicExpr::AO__atomic_add_fetch:
575     PostOp = llvm::Instruction::Add;
576     LLVM_FALLTHROUGH;
577   case AtomicExpr::AO__c11_atomic_fetch_add:
578   case AtomicExpr::AO__opencl_atomic_fetch_add:
579   case AtomicExpr::AO__atomic_fetch_add:
580     Op = llvm::AtomicRMWInst::Add;
581     break;
582 
583   case AtomicExpr::AO__atomic_sub_fetch:
584     PostOp = llvm::Instruction::Sub;
585     LLVM_FALLTHROUGH;
586   case AtomicExpr::AO__c11_atomic_fetch_sub:
587   case AtomicExpr::AO__opencl_atomic_fetch_sub:
588   case AtomicExpr::AO__atomic_fetch_sub:
589     Op = llvm::AtomicRMWInst::Sub;
590     break;
591 
592   case AtomicExpr::AO__opencl_atomic_fetch_min:
593     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
594                                                   : llvm::AtomicRMWInst::UMin;
595     break;
596 
597   case AtomicExpr::AO__opencl_atomic_fetch_max:
598     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
599                                                   : llvm::AtomicRMWInst::UMax;
600     break;
601 
602   case AtomicExpr::AO__atomic_and_fetch:
603     PostOp = llvm::Instruction::And;
604     LLVM_FALLTHROUGH;
605   case AtomicExpr::AO__c11_atomic_fetch_and:
606   case AtomicExpr::AO__opencl_atomic_fetch_and:
607   case AtomicExpr::AO__atomic_fetch_and:
608     Op = llvm::AtomicRMWInst::And;
609     break;
610 
611   case AtomicExpr::AO__atomic_or_fetch:
612     PostOp = llvm::Instruction::Or;
613     LLVM_FALLTHROUGH;
614   case AtomicExpr::AO__c11_atomic_fetch_or:
615   case AtomicExpr::AO__opencl_atomic_fetch_or:
616   case AtomicExpr::AO__atomic_fetch_or:
617     Op = llvm::AtomicRMWInst::Or;
618     break;
619 
620   case AtomicExpr::AO__atomic_xor_fetch:
621     PostOp = llvm::Instruction::Xor;
622     LLVM_FALLTHROUGH;
623   case AtomicExpr::AO__c11_atomic_fetch_xor:
624   case AtomicExpr::AO__opencl_atomic_fetch_xor:
625   case AtomicExpr::AO__atomic_fetch_xor:
626     Op = llvm::AtomicRMWInst::Xor;
627     break;
628 
629   case AtomicExpr::AO__atomic_nand_fetch:
630     PostOp = llvm::Instruction::And; // the NOT is special cased below
631     LLVM_FALLTHROUGH;
632   case AtomicExpr::AO__atomic_fetch_nand:
633     Op = llvm::AtomicRMWInst::Nand;
634     break;
635   }
636 
637   llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
638   llvm::AtomicRMWInst *RMWI =
639       CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
640   RMWI->setVolatile(E->isVolatile());
641 
642   // For __atomic_*_fetch operations, perform the operation again to
643   // determine the value which was written.
644   llvm::Value *Result = RMWI;
645   if (PostOp)
646     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
647   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
648     Result = CGF.Builder.CreateNot(Result);
649   CGF.Builder.CreateStore(Result, Dest);
650 }
651 
652 // This function emits any expression (scalar, complex, or aggregate)
653 // into a temporary alloca.
654 static Address
655 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
656   Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
657   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
658                        /*Init*/ true);
659   return DeclPtr;
660 }
661 
662 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
663                          Address Ptr, Address Val1, Address Val2,
664                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
665                          uint64_t Size, llvm::AtomicOrdering Order,
666                          llvm::Value *Scope) {
667   auto ScopeModel = Expr->getScopeModel();
668 
669   // LLVM atomic instructions always have synch scope. If clang atomic
670   // expression has no scope operand, use default LLVM synch scope.
671   if (!ScopeModel) {
672     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
673                  Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
674     return;
675   }
676 
677   // Handle constant scope.
678   if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
679     auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
680         ScopeModel->map(SC->getZExtValue()), CGF.CGM.getLLVMContext());
681     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
682                  Order, SCID);
683     return;
684   }
685 
686   // Handle non-constant scope.
687   auto &Builder = CGF.Builder;
688   auto Scopes = ScopeModel->getRuntimeValues();
689   llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
690   for (auto S : Scopes)
691     BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
692 
693   llvm::BasicBlock *ContBB =
694       CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
695 
696   auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
697   // If unsupported synch scope is encountered at run time, assume a fallback
698   // synch scope value.
699   auto FallBack = ScopeModel->getFallBackValue();
700   llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
701   for (auto S : Scopes) {
702     auto *B = BB[S];
703     if (S != FallBack)
704       SI->addCase(Builder.getInt32(S), B);
705 
706     Builder.SetInsertPoint(B);
707     EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
708                  Order,
709                  CGF.getTargetHooks().getLLVMSyncScopeID(ScopeModel->map(S),
710                                                          CGF.getLLVMContext()));
711     Builder.CreateBr(ContBB);
712   }
713 
714   Builder.SetInsertPoint(ContBB);
715 }
716 
717 static void
718 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
719                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
720                   SourceLocation Loc, CharUnits SizeInChars) {
721   if (UseOptimizedLibcall) {
722     // Load value and pass it to the function directly.
723     CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
724     int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
725     ValTy =
726         CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
727     llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
728                                                 SizeInBits)->getPointerTo();
729     Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
730     Val = CGF.EmitLoadOfScalar(Ptr, false,
731                                CGF.getContext().getPointerType(ValTy),
732                                Loc);
733     // Coerce the value into an appropriately sized integer type.
734     Args.add(RValue::get(Val), ValTy);
735   } else {
736     // Non-optimized functions always take a reference.
737     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
738                          CGF.getContext().VoidPtrTy);
739   }
740 }
741 
742 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
743   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
744   QualType MemTy = AtomicTy;
745   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
746     MemTy = AT->getValueType();
747   llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
748 
749   Address Val1 = Address::invalid();
750   Address Val2 = Address::invalid();
751   Address Dest = Address::invalid();
752   Address Ptr = EmitPointerWithAlignment(E->getPtr());
753 
754   CharUnits sizeChars, alignChars;
755   std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
756   uint64_t Size = sizeChars.getQuantity();
757   unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
758   bool UseLibcall = ((Ptr.getAlignment() % sizeChars) != 0 ||
759                      getContext().toBits(sizeChars) > MaxInlineWidthInBits);
760 
761   if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
762       E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
763     LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
764     EmitAtomicInit(E->getVal1(), lvalue);
765     return RValue::get(nullptr);
766   }
767 
768   llvm::Value *Order = EmitScalarExpr(E->getOrder());
769   llvm::Value *Scope =
770       E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
771 
772   switch (E->getOp()) {
773   case AtomicExpr::AO__c11_atomic_init:
774   case AtomicExpr::AO__opencl_atomic_init:
775     llvm_unreachable("Already handled above with EmitAtomicInit!");
776 
777   case AtomicExpr::AO__c11_atomic_load:
778   case AtomicExpr::AO__opencl_atomic_load:
779   case AtomicExpr::AO__atomic_load_n:
780     break;
781 
782   case AtomicExpr::AO__atomic_load:
783     Dest = EmitPointerWithAlignment(E->getVal1());
784     break;
785 
786   case AtomicExpr::AO__atomic_store:
787     Val1 = EmitPointerWithAlignment(E->getVal1());
788     break;
789 
790   case AtomicExpr::AO__atomic_exchange:
791     Val1 = EmitPointerWithAlignment(E->getVal1());
792     Dest = EmitPointerWithAlignment(E->getVal2());
793     break;
794 
795   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
796   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
797   case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
798   case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
799   case AtomicExpr::AO__atomic_compare_exchange_n:
800   case AtomicExpr::AO__atomic_compare_exchange:
801     Val1 = EmitPointerWithAlignment(E->getVal1());
802     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
803       Val2 = EmitPointerWithAlignment(E->getVal2());
804     else
805       Val2 = EmitValToTemp(*this, E->getVal2());
806     OrderFail = EmitScalarExpr(E->getOrderFail());
807     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
808         E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
809       IsWeak = EmitScalarExpr(E->getWeak());
810     break;
811 
812   case AtomicExpr::AO__c11_atomic_fetch_add:
813   case AtomicExpr::AO__c11_atomic_fetch_sub:
814   case AtomicExpr::AO__opencl_atomic_fetch_add:
815   case AtomicExpr::AO__opencl_atomic_fetch_sub:
816     if (MemTy->isPointerType()) {
817       // For pointer arithmetic, we're required to do a bit of math:
818       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
819       // ... but only for the C11 builtins. The GNU builtins expect the
820       // user to multiply by sizeof(T).
821       QualType Val1Ty = E->getVal1()->getType();
822       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
823       CharUnits PointeeIncAmt =
824           getContext().getTypeSizeInChars(MemTy->getPointeeType());
825       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
826       auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
827       Val1 = Temp;
828       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
829       break;
830     }
831       LLVM_FALLTHROUGH;
832   case AtomicExpr::AO__atomic_fetch_add:
833   case AtomicExpr::AO__atomic_fetch_sub:
834   case AtomicExpr::AO__atomic_add_fetch:
835   case AtomicExpr::AO__atomic_sub_fetch:
836   case AtomicExpr::AO__c11_atomic_store:
837   case AtomicExpr::AO__c11_atomic_exchange:
838   case AtomicExpr::AO__opencl_atomic_store:
839   case AtomicExpr::AO__opencl_atomic_exchange:
840   case AtomicExpr::AO__atomic_store_n:
841   case AtomicExpr::AO__atomic_exchange_n:
842   case AtomicExpr::AO__c11_atomic_fetch_and:
843   case AtomicExpr::AO__c11_atomic_fetch_or:
844   case AtomicExpr::AO__c11_atomic_fetch_xor:
845   case AtomicExpr::AO__opencl_atomic_fetch_and:
846   case AtomicExpr::AO__opencl_atomic_fetch_or:
847   case AtomicExpr::AO__opencl_atomic_fetch_xor:
848   case AtomicExpr::AO__opencl_atomic_fetch_min:
849   case AtomicExpr::AO__opencl_atomic_fetch_max:
850   case AtomicExpr::AO__atomic_fetch_and:
851   case AtomicExpr::AO__atomic_fetch_or:
852   case AtomicExpr::AO__atomic_fetch_xor:
853   case AtomicExpr::AO__atomic_fetch_nand:
854   case AtomicExpr::AO__atomic_and_fetch:
855   case AtomicExpr::AO__atomic_or_fetch:
856   case AtomicExpr::AO__atomic_xor_fetch:
857   case AtomicExpr::AO__atomic_nand_fetch:
858     Val1 = EmitValToTemp(*this, E->getVal1());
859     break;
860   }
861 
862   QualType RValTy = E->getType().getUnqualifiedType();
863 
864   // The inlined atomics only function on iN types, where N is a power of 2. We
865   // need to make sure (via temporaries if necessary) that all incoming values
866   // are compatible.
867   LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
868   AtomicInfo Atomics(*this, AtomicVal);
869 
870   Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
871   if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
872   if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
873   if (Dest.isValid())
874     Dest = Atomics.emitCastToAtomicIntPointer(Dest);
875   else if (E->isCmpXChg())
876     Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
877   else if (!RValTy->isVoidType())
878     Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
879 
880   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
881   if (UseLibcall) {
882     bool UseOptimizedLibcall = false;
883     switch (E->getOp()) {
884     case AtomicExpr::AO__c11_atomic_init:
885     case AtomicExpr::AO__opencl_atomic_init:
886       llvm_unreachable("Already handled above with EmitAtomicInit!");
887 
888     case AtomicExpr::AO__c11_atomic_fetch_add:
889     case AtomicExpr::AO__opencl_atomic_fetch_add:
890     case AtomicExpr::AO__atomic_fetch_add:
891     case AtomicExpr::AO__c11_atomic_fetch_and:
892     case AtomicExpr::AO__opencl_atomic_fetch_and:
893     case AtomicExpr::AO__atomic_fetch_and:
894     case AtomicExpr::AO__c11_atomic_fetch_or:
895     case AtomicExpr::AO__opencl_atomic_fetch_or:
896     case AtomicExpr::AO__atomic_fetch_or:
897     case AtomicExpr::AO__atomic_fetch_nand:
898     case AtomicExpr::AO__c11_atomic_fetch_sub:
899     case AtomicExpr::AO__opencl_atomic_fetch_sub:
900     case AtomicExpr::AO__atomic_fetch_sub:
901     case AtomicExpr::AO__c11_atomic_fetch_xor:
902     case AtomicExpr::AO__opencl_atomic_fetch_xor:
903     case AtomicExpr::AO__opencl_atomic_fetch_min:
904     case AtomicExpr::AO__opencl_atomic_fetch_max:
905     case AtomicExpr::AO__atomic_fetch_xor:
906     case AtomicExpr::AO__atomic_add_fetch:
907     case AtomicExpr::AO__atomic_and_fetch:
908     case AtomicExpr::AO__atomic_nand_fetch:
909     case AtomicExpr::AO__atomic_or_fetch:
910     case AtomicExpr::AO__atomic_sub_fetch:
911     case AtomicExpr::AO__atomic_xor_fetch:
912       // For these, only library calls for certain sizes exist.
913       UseOptimizedLibcall = true;
914       break;
915 
916     case AtomicExpr::AO__c11_atomic_load:
917     case AtomicExpr::AO__c11_atomic_store:
918     case AtomicExpr::AO__c11_atomic_exchange:
919     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
920     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
921     case AtomicExpr::AO__opencl_atomic_load:
922     case AtomicExpr::AO__opencl_atomic_store:
923     case AtomicExpr::AO__opencl_atomic_exchange:
924     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
925     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
926     case AtomicExpr::AO__atomic_load_n:
927     case AtomicExpr::AO__atomic_load:
928     case AtomicExpr::AO__atomic_store_n:
929     case AtomicExpr::AO__atomic_store:
930     case AtomicExpr::AO__atomic_exchange_n:
931     case AtomicExpr::AO__atomic_exchange:
932     case AtomicExpr::AO__atomic_compare_exchange_n:
933     case AtomicExpr::AO__atomic_compare_exchange:
934       // Only use optimized library calls for sizes for which they exist.
935       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
936         UseOptimizedLibcall = true;
937       break;
938     }
939 
940     CallArgList Args;
941     if (!UseOptimizedLibcall) {
942       // For non-optimized library calls, the size is the first parameter
943       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
944                getContext().getSizeType());
945     }
946     // Atomic address is the first or second parameter
947     // The OpenCL atomic library functions only accept pointer arguments to
948     // generic address space.
949     auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
950       if (!E->isOpenCL())
951         return V;
952       auto AS = PT->getAs<PointerType>()->getPointeeType().getAddressSpace();
953       if (AS == LangAS::opencl_generic)
954         return V;
955       auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
956       auto T = V->getType();
957       auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
958 
959       return getTargetHooks().performAddrSpaceCast(
960           *this, V, AS, LangAS::opencl_generic, DestType, false);
961     };
962 
963     Args.add(RValue::get(CastToGenericAddrSpace(
964                  EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
965              getContext().VoidPtrTy);
966 
967     std::string LibCallName;
968     QualType LoweredMemTy =
969       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
970     QualType RetTy;
971     bool HaveRetTy = false;
972     llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
973     switch (E->getOp()) {
974     case AtomicExpr::AO__c11_atomic_init:
975     case AtomicExpr::AO__opencl_atomic_init:
976       llvm_unreachable("Already handled!");
977 
978     // There is only one libcall for compare an exchange, because there is no
979     // optimisation benefit possible from a libcall version of a weak compare
980     // and exchange.
981     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
982     //                                void *desired, int success, int failure)
983     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
984     //                                  int success, int failure)
985     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
986     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
987     case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
988     case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
989     case AtomicExpr::AO__atomic_compare_exchange:
990     case AtomicExpr::AO__atomic_compare_exchange_n:
991       LibCallName = "__atomic_compare_exchange";
992       RetTy = getContext().BoolTy;
993       HaveRetTy = true;
994       Args.add(
995           RValue::get(CastToGenericAddrSpace(
996               EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
997           getContext().VoidPtrTy);
998       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
999                         MemTy, E->getExprLoc(), sizeChars);
1000       Args.add(RValue::get(Order), getContext().IntTy);
1001       Order = OrderFail;
1002       break;
1003     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1004     //                        int order)
1005     // T __atomic_exchange_N(T *mem, T val, int order)
1006     case AtomicExpr::AO__c11_atomic_exchange:
1007     case AtomicExpr::AO__opencl_atomic_exchange:
1008     case AtomicExpr::AO__atomic_exchange_n:
1009     case AtomicExpr::AO__atomic_exchange:
1010       LibCallName = "__atomic_exchange";
1011       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1012                         MemTy, E->getExprLoc(), sizeChars);
1013       break;
1014     // void __atomic_store(size_t size, void *mem, void *val, int order)
1015     // void __atomic_store_N(T *mem, T val, int order)
1016     case AtomicExpr::AO__c11_atomic_store:
1017     case AtomicExpr::AO__opencl_atomic_store:
1018     case AtomicExpr::AO__atomic_store:
1019     case AtomicExpr::AO__atomic_store_n:
1020       LibCallName = "__atomic_store";
1021       RetTy = getContext().VoidTy;
1022       HaveRetTy = true;
1023       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1024                         MemTy, E->getExprLoc(), sizeChars);
1025       break;
1026     // void __atomic_load(size_t size, void *mem, void *return, int order)
1027     // T __atomic_load_N(T *mem, int order)
1028     case AtomicExpr::AO__c11_atomic_load:
1029     case AtomicExpr::AO__opencl_atomic_load:
1030     case AtomicExpr::AO__atomic_load:
1031     case AtomicExpr::AO__atomic_load_n:
1032       LibCallName = "__atomic_load";
1033       break;
1034     // T __atomic_add_fetch_N(T *mem, T val, int order)
1035     // T __atomic_fetch_add_N(T *mem, T val, int order)
1036     case AtomicExpr::AO__atomic_add_fetch:
1037       PostOp = llvm::Instruction::Add;
1038       LLVM_FALLTHROUGH;
1039     case AtomicExpr::AO__c11_atomic_fetch_add:
1040     case AtomicExpr::AO__opencl_atomic_fetch_add:
1041     case AtomicExpr::AO__atomic_fetch_add:
1042       LibCallName = "__atomic_fetch_add";
1043       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1044                         LoweredMemTy, E->getExprLoc(), sizeChars);
1045       break;
1046     // T __atomic_and_fetch_N(T *mem, T val, int order)
1047     // T __atomic_fetch_and_N(T *mem, T val, int order)
1048     case AtomicExpr::AO__atomic_and_fetch:
1049       PostOp = llvm::Instruction::And;
1050       LLVM_FALLTHROUGH;
1051     case AtomicExpr::AO__c11_atomic_fetch_and:
1052     case AtomicExpr::AO__opencl_atomic_fetch_and:
1053     case AtomicExpr::AO__atomic_fetch_and:
1054       LibCallName = "__atomic_fetch_and";
1055       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1056                         MemTy, E->getExprLoc(), sizeChars);
1057       break;
1058     // T __atomic_or_fetch_N(T *mem, T val, int order)
1059     // T __atomic_fetch_or_N(T *mem, T val, int order)
1060     case AtomicExpr::AO__atomic_or_fetch:
1061       PostOp = llvm::Instruction::Or;
1062       LLVM_FALLTHROUGH;
1063     case AtomicExpr::AO__c11_atomic_fetch_or:
1064     case AtomicExpr::AO__opencl_atomic_fetch_or:
1065     case AtomicExpr::AO__atomic_fetch_or:
1066       LibCallName = "__atomic_fetch_or";
1067       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1068                         MemTy, E->getExprLoc(), sizeChars);
1069       break;
1070     // T __atomic_sub_fetch_N(T *mem, T val, int order)
1071     // T __atomic_fetch_sub_N(T *mem, T val, int order)
1072     case AtomicExpr::AO__atomic_sub_fetch:
1073       PostOp = llvm::Instruction::Sub;
1074       LLVM_FALLTHROUGH;
1075     case AtomicExpr::AO__c11_atomic_fetch_sub:
1076     case AtomicExpr::AO__opencl_atomic_fetch_sub:
1077     case AtomicExpr::AO__atomic_fetch_sub:
1078       LibCallName = "__atomic_fetch_sub";
1079       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1080                         LoweredMemTy, E->getExprLoc(), sizeChars);
1081       break;
1082     // T __atomic_xor_fetch_N(T *mem, T val, int order)
1083     // T __atomic_fetch_xor_N(T *mem, T val, int order)
1084     case AtomicExpr::AO__atomic_xor_fetch:
1085       PostOp = llvm::Instruction::Xor;
1086       LLVM_FALLTHROUGH;
1087     case AtomicExpr::AO__c11_atomic_fetch_xor:
1088     case AtomicExpr::AO__opencl_atomic_fetch_xor:
1089     case AtomicExpr::AO__atomic_fetch_xor:
1090       LibCallName = "__atomic_fetch_xor";
1091       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1092                         MemTy, E->getExprLoc(), sizeChars);
1093       break;
1094     case AtomicExpr::AO__opencl_atomic_fetch_min:
1095       LibCallName = E->getValueType()->isSignedIntegerType()
1096                         ? "__atomic_fetch_min"
1097                         : "__atomic_fetch_umin";
1098       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1099                         LoweredMemTy, E->getExprLoc(), sizeChars);
1100       break;
1101     case AtomicExpr::AO__opencl_atomic_fetch_max:
1102       LibCallName = E->getValueType()->isSignedIntegerType()
1103                         ? "__atomic_fetch_max"
1104                         : "__atomic_fetch_umax";
1105       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1106                         LoweredMemTy, E->getExprLoc(), sizeChars);
1107       break;
1108     // T __atomic_nand_fetch_N(T *mem, T val, int order)
1109     // T __atomic_fetch_nand_N(T *mem, T val, int order)
1110     case AtomicExpr::AO__atomic_nand_fetch:
1111       PostOp = llvm::Instruction::And; // the NOT is special cased below
1112       LLVM_FALLTHROUGH;
1113     case AtomicExpr::AO__atomic_fetch_nand:
1114       LibCallName = "__atomic_fetch_nand";
1115       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1116                         MemTy, E->getExprLoc(), sizeChars);
1117       break;
1118     }
1119 
1120     if (E->isOpenCL()) {
1121       LibCallName = std::string("__opencl") +
1122           StringRef(LibCallName).drop_front(1).str();
1123 
1124     }
1125     // Optimized functions have the size in their name.
1126     if (UseOptimizedLibcall)
1127       LibCallName += "_" + llvm::utostr(Size);
1128     // By default, assume we return a value of the atomic type.
1129     if (!HaveRetTy) {
1130       if (UseOptimizedLibcall) {
1131         // Value is returned directly.
1132         // The function returns an appropriately sized integer type.
1133         RetTy = getContext().getIntTypeForBitwidth(
1134             getContext().toBits(sizeChars), /*Signed=*/false);
1135       } else {
1136         // Value is returned through parameter before the order.
1137         RetTy = getContext().VoidTy;
1138         Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1139                  getContext().VoidPtrTy);
1140       }
1141     }
1142     // order is always the last parameter
1143     Args.add(RValue::get(Order),
1144              getContext().IntTy);
1145     if (E->isOpenCL())
1146       Args.add(RValue::get(Scope), getContext().IntTy);
1147 
1148     // PostOp is only needed for the atomic_*_fetch operations, and
1149     // thus is only needed for and implemented in the
1150     // UseOptimizedLibcall codepath.
1151     assert(UseOptimizedLibcall || !PostOp);
1152 
1153     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1154     // The value is returned directly from the libcall.
1155     if (E->isCmpXChg())
1156       return Res;
1157 
1158     // The value is returned directly for optimized libcalls but the expr
1159     // provided an out-param.
1160     if (UseOptimizedLibcall && Res.getScalarVal()) {
1161       llvm::Value *ResVal = Res.getScalarVal();
1162       if (PostOp) {
1163         llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1164         ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1165       }
1166       if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1167         ResVal = Builder.CreateNot(ResVal);
1168 
1169       Builder.CreateStore(
1170           ResVal,
1171           Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1172     }
1173 
1174     if (RValTy->isVoidType())
1175       return RValue::get(nullptr);
1176 
1177     return convertTempToRValue(
1178         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1179         RValTy, E->getExprLoc());
1180   }
1181 
1182   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1183                  E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1184                  E->getOp() == AtomicExpr::AO__atomic_store ||
1185                  E->getOp() == AtomicExpr::AO__atomic_store_n;
1186   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1187                 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1188                 E->getOp() == AtomicExpr::AO__atomic_load ||
1189                 E->getOp() == AtomicExpr::AO__atomic_load_n;
1190 
1191   if (isa<llvm::ConstantInt>(Order)) {
1192     auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1193     // We should not ever get to a case where the ordering isn't a valid C ABI
1194     // value, but it's hard to enforce that in general.
1195     if (llvm::isValidAtomicOrderingCABI(ord))
1196       switch ((llvm::AtomicOrderingCABI)ord) {
1197       case llvm::AtomicOrderingCABI::relaxed:
1198         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1199                      llvm::AtomicOrdering::Monotonic, Scope);
1200         break;
1201       case llvm::AtomicOrderingCABI::consume:
1202       case llvm::AtomicOrderingCABI::acquire:
1203         if (IsStore)
1204           break; // Avoid crashing on code with undefined behavior
1205         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1206                      llvm::AtomicOrdering::Acquire, Scope);
1207         break;
1208       case llvm::AtomicOrderingCABI::release:
1209         if (IsLoad)
1210           break; // Avoid crashing on code with undefined behavior
1211         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1212                      llvm::AtomicOrdering::Release, Scope);
1213         break;
1214       case llvm::AtomicOrderingCABI::acq_rel:
1215         if (IsLoad || IsStore)
1216           break; // Avoid crashing on code with undefined behavior
1217         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1218                      llvm::AtomicOrdering::AcquireRelease, Scope);
1219         break;
1220       case llvm::AtomicOrderingCABI::seq_cst:
1221         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1222                      llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1223         break;
1224       }
1225     if (RValTy->isVoidType())
1226       return RValue::get(nullptr);
1227 
1228     return convertTempToRValue(
1229         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1230                                         Dest.getAddressSpace())),
1231         RValTy, E->getExprLoc());
1232   }
1233 
1234   // Long case, when Order isn't obviously constant.
1235 
1236   // Create all the relevant BB's
1237   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1238                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1239                    *SeqCstBB = nullptr;
1240   MonotonicBB = createBasicBlock("monotonic", CurFn);
1241   if (!IsStore)
1242     AcquireBB = createBasicBlock("acquire", CurFn);
1243   if (!IsLoad)
1244     ReleaseBB = createBasicBlock("release", CurFn);
1245   if (!IsLoad && !IsStore)
1246     AcqRelBB = createBasicBlock("acqrel", CurFn);
1247   SeqCstBB = createBasicBlock("seqcst", CurFn);
1248   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1249 
1250   // Create the switch for the split
1251   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1252   // doesn't matter unless someone is crazy enough to use something that
1253   // doesn't fold to a constant for the ordering.
1254   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1255   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1256 
1257   // Emit all the different atomics
1258   Builder.SetInsertPoint(MonotonicBB);
1259   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1260                llvm::AtomicOrdering::Monotonic, Scope);
1261   Builder.CreateBr(ContBB);
1262   if (!IsStore) {
1263     Builder.SetInsertPoint(AcquireBB);
1264     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1265                  llvm::AtomicOrdering::Acquire, Scope);
1266     Builder.CreateBr(ContBB);
1267     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1268                 AcquireBB);
1269     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1270                 AcquireBB);
1271   }
1272   if (!IsLoad) {
1273     Builder.SetInsertPoint(ReleaseBB);
1274     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1275                  llvm::AtomicOrdering::Release, Scope);
1276     Builder.CreateBr(ContBB);
1277     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1278                 ReleaseBB);
1279   }
1280   if (!IsLoad && !IsStore) {
1281     Builder.SetInsertPoint(AcqRelBB);
1282     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1283                  llvm::AtomicOrdering::AcquireRelease, Scope);
1284     Builder.CreateBr(ContBB);
1285     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1286                 AcqRelBB);
1287   }
1288   Builder.SetInsertPoint(SeqCstBB);
1289   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1290                llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1291   Builder.CreateBr(ContBB);
1292   SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1293               SeqCstBB);
1294 
1295   // Cleanup and return
1296   Builder.SetInsertPoint(ContBB);
1297   if (RValTy->isVoidType())
1298     return RValue::get(nullptr);
1299 
1300   assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1301   return convertTempToRValue(
1302       Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1303                                       Dest.getAddressSpace())),
1304       RValTy, E->getExprLoc());
1305 }
1306 
1307 Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1308   unsigned addrspace =
1309     cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1310   llvm::IntegerType *ty =
1311     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1312   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1313 }
1314 
1315 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1316   llvm::Type *Ty = Addr.getElementType();
1317   uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1318   if (SourceSizeInBits != AtomicSizeInBits) {
1319     Address Tmp = CreateTempAlloca();
1320     CGF.Builder.CreateMemCpy(Tmp, Addr,
1321                              std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1322     Addr = Tmp;
1323   }
1324 
1325   return emitCastToAtomicIntPointer(Addr);
1326 }
1327 
1328 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1329                                              AggValueSlot resultSlot,
1330                                              SourceLocation loc,
1331                                              bool asValue) const {
1332   if (LVal.isSimple()) {
1333     if (EvaluationKind == TEK_Aggregate)
1334       return resultSlot.asRValue();
1335 
1336     // Drill into the padding structure if we have one.
1337     if (hasPadding())
1338       addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
1339 
1340     // Otherwise, just convert the temporary to an r-value using the
1341     // normal conversion routine.
1342     return CGF.convertTempToRValue(addr, getValueType(), loc);
1343   }
1344   if (!asValue)
1345     // Get RValue from temp memory as atomic for non-simple lvalues
1346     return RValue::get(CGF.Builder.CreateLoad(addr));
1347   if (LVal.isBitField())
1348     return CGF.EmitLoadOfBitfieldLValue(
1349         LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1350                              LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1351   if (LVal.isVectorElt())
1352     return CGF.EmitLoadOfLValue(
1353         LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1354                               LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1355   assert(LVal.isExtVectorElt());
1356   return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1357       addr, LVal.getExtVectorElts(), LVal.getType(),
1358       LVal.getBaseInfo(), TBAAAccessInfo()));
1359 }
1360 
1361 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1362                                              AggValueSlot ResultSlot,
1363                                              SourceLocation Loc,
1364                                              bool AsValue) const {
1365   // Try not to in some easy cases.
1366   assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1367   if (getEvaluationKind() == TEK_Scalar &&
1368       (((!LVal.isBitField() ||
1369          LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1370         !hasPadding()) ||
1371        !AsValue)) {
1372     auto *ValTy = AsValue
1373                       ? CGF.ConvertTypeForMem(ValueTy)
1374                       : getAtomicAddress().getType()->getPointerElementType();
1375     if (ValTy->isIntegerTy()) {
1376       assert(IntVal->getType() == ValTy && "Different integer types.");
1377       return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1378     } else if (ValTy->isPointerTy())
1379       return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1380     else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1381       return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1382   }
1383 
1384   // Create a temporary.  This needs to be big enough to hold the
1385   // atomic integer.
1386   Address Temp = Address::invalid();
1387   bool TempIsVolatile = false;
1388   if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1389     assert(!ResultSlot.isIgnored());
1390     Temp = ResultSlot.getAddress();
1391     TempIsVolatile = ResultSlot.isVolatile();
1392   } else {
1393     Temp = CreateTempAlloca();
1394   }
1395 
1396   // Slam the integer into the temporary.
1397   Address CastTemp = emitCastToAtomicIntPointer(Temp);
1398   CGF.Builder.CreateStore(IntVal, CastTemp)
1399       ->setVolatile(TempIsVolatile);
1400 
1401   return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1402 }
1403 
1404 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1405                                        llvm::AtomicOrdering AO, bool) {
1406   // void __atomic_load(size_t size, void *mem, void *return, int order);
1407   CallArgList Args;
1408   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1409   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1410            CGF.getContext().VoidPtrTy);
1411   Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1412            CGF.getContext().VoidPtrTy);
1413   Args.add(
1414       RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1415       CGF.getContext().IntTy);
1416   emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1417 }
1418 
1419 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1420                                           bool IsVolatile) {
1421   // Okay, we're doing this natively.
1422   Address Addr = getAtomicAddressAsAtomicIntPointer();
1423   llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1424   Load->setAtomic(AO);
1425 
1426   // Other decoration.
1427   if (IsVolatile)
1428     Load->setVolatile(true);
1429   CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1430   return Load;
1431 }
1432 
1433 /// An LValue is a candidate for having its loads and stores be made atomic if
1434 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1435 /// performing such an operation can be performed without a libcall.
1436 bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1437   if (!CGM.getCodeGenOpts().MSVolatile) return false;
1438   AtomicInfo AI(*this, LV);
1439   bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1440   // An atomic is inline if we don't need to use a libcall.
1441   bool AtomicIsInline = !AI.shouldUseLibcall();
1442   // MSVC doesn't seem to do this for types wider than a pointer.
1443   if (getContext().getTypeSize(LV.getType()) >
1444       getContext().getTypeSize(getContext().getIntPtrType()))
1445     return false;
1446   return IsVolatile && AtomicIsInline;
1447 }
1448 
1449 RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1450                                        AggValueSlot Slot) {
1451   llvm::AtomicOrdering AO;
1452   bool IsVolatile = LV.isVolatileQualified();
1453   if (LV.getType()->isAtomicType()) {
1454     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1455   } else {
1456     AO = llvm::AtomicOrdering::Acquire;
1457     IsVolatile = true;
1458   }
1459   return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1460 }
1461 
1462 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1463                                   bool AsValue, llvm::AtomicOrdering AO,
1464                                   bool IsVolatile) {
1465   // Check whether we should use a library call.
1466   if (shouldUseLibcall()) {
1467     Address TempAddr = Address::invalid();
1468     if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1469       assert(getEvaluationKind() == TEK_Aggregate);
1470       TempAddr = ResultSlot.getAddress();
1471     } else
1472       TempAddr = CreateTempAlloca();
1473 
1474     EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1475 
1476     // Okay, turn that back into the original value or whole atomic (for
1477     // non-simple lvalues) type.
1478     return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1479   }
1480 
1481   // Okay, we're doing this natively.
1482   auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1483 
1484   // If we're ignoring an aggregate return, don't do anything.
1485   if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1486     return RValue::getAggregate(Address::invalid(), false);
1487 
1488   // Okay, turn that back into the original value or atomic (for non-simple
1489   // lvalues) type.
1490   return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1491 }
1492 
1493 /// Emit a load from an l-value of atomic type.  Note that the r-value
1494 /// we produce is an r-value of the atomic *value* type.
1495 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1496                                        llvm::AtomicOrdering AO, bool IsVolatile,
1497                                        AggValueSlot resultSlot) {
1498   AtomicInfo Atomics(*this, src);
1499   return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1500                                 IsVolatile);
1501 }
1502 
1503 /// Copy an r-value into memory as part of storing to an atomic type.
1504 /// This needs to create a bit-pattern suitable for atomic operations.
1505 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1506   assert(LVal.isSimple());
1507   // If we have an r-value, the rvalue should be of the atomic type,
1508   // which means that the caller is responsible for having zeroed
1509   // any padding.  Just do an aggregate copy of that type.
1510   if (rvalue.isAggregate()) {
1511     LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1512     LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1513                                     getAtomicType());
1514     bool IsVolatile = rvalue.isVolatileQualified() ||
1515                       LVal.isVolatileQualified();
1516     CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1517                           AggValueSlot::DoesNotOverlap, IsVolatile);
1518     return;
1519   }
1520 
1521   // Okay, otherwise we're copying stuff.
1522 
1523   // Zero out the buffer if necessary.
1524   emitMemSetZeroIfNecessary();
1525 
1526   // Drill past the padding if present.
1527   LValue TempLVal = projectValue();
1528 
1529   // Okay, store the rvalue in.
1530   if (rvalue.isScalar()) {
1531     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1532   } else {
1533     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1534   }
1535 }
1536 
1537 
1538 /// Materialize an r-value into memory for the purposes of storing it
1539 /// to an atomic type.
1540 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1541   // Aggregate r-values are already in memory, and EmitAtomicStore
1542   // requires them to be values of the atomic type.
1543   if (rvalue.isAggregate())
1544     return rvalue.getAggregateAddress();
1545 
1546   // Otherwise, make a temporary and materialize into it.
1547   LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1548   AtomicInfo Atomics(CGF, TempLV);
1549   Atomics.emitCopyIntoMemory(rvalue);
1550   return TempLV.getAddress();
1551 }
1552 
1553 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1554   // If we've got a scalar value of the right size, try to avoid going
1555   // through memory.
1556   if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1557     llvm::Value *Value = RVal.getScalarVal();
1558     if (isa<llvm::IntegerType>(Value->getType()))
1559       return CGF.EmitToMemory(Value, ValueTy);
1560     else {
1561       llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1562           CGF.getLLVMContext(),
1563           LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1564       if (isa<llvm::PointerType>(Value->getType()))
1565         return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1566       else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1567         return CGF.Builder.CreateBitCast(Value, InputIntTy);
1568     }
1569   }
1570   // Otherwise, we need to go through memory.
1571   // Put the r-value in memory.
1572   Address Addr = materializeRValue(RVal);
1573 
1574   // Cast the temporary to the atomic int type and pull a value out.
1575   Addr = emitCastToAtomicIntPointer(Addr);
1576   return CGF.Builder.CreateLoad(Addr);
1577 }
1578 
1579 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1580     llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1581     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1582   // Do the atomic store.
1583   Address Addr = getAtomicAddressAsAtomicIntPointer();
1584   auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1585                                                ExpectedVal, DesiredVal,
1586                                                Success, Failure);
1587   // Other decoration.
1588   Inst->setVolatile(LVal.isVolatileQualified());
1589   Inst->setWeak(IsWeak);
1590 
1591   // Okay, turn that back into the original value type.
1592   auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1593   auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1594   return std::make_pair(PreviousVal, SuccessFailureVal);
1595 }
1596 
1597 llvm::Value *
1598 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1599                                              llvm::Value *DesiredAddr,
1600                                              llvm::AtomicOrdering Success,
1601                                              llvm::AtomicOrdering Failure) {
1602   // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1603   // void *desired, int success, int failure);
1604   CallArgList Args;
1605   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1606   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1607            CGF.getContext().VoidPtrTy);
1608   Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1609            CGF.getContext().VoidPtrTy);
1610   Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1611            CGF.getContext().VoidPtrTy);
1612   Args.add(RValue::get(
1613                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1614            CGF.getContext().IntTy);
1615   Args.add(RValue::get(
1616                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1617            CGF.getContext().IntTy);
1618   auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1619                                               CGF.getContext().BoolTy, Args);
1620 
1621   return SuccessFailureRVal.getScalarVal();
1622 }
1623 
1624 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1625     RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1626     llvm::AtomicOrdering Failure, bool IsWeak) {
1627   if (isStrongerThan(Failure, Success))
1628     // Don't assert on undefined behavior "failure argument shall be no stronger
1629     // than the success argument".
1630     Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1631 
1632   // Check whether we should use a library call.
1633   if (shouldUseLibcall()) {
1634     // Produce a source address.
1635     Address ExpectedAddr = materializeRValue(Expected);
1636     Address DesiredAddr = materializeRValue(Desired);
1637     auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1638                                                  DesiredAddr.getPointer(),
1639                                                  Success, Failure);
1640     return std::make_pair(
1641         convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1642                                   SourceLocation(), /*AsValue=*/false),
1643         Res);
1644   }
1645 
1646   // If we've got a scalar value of the right size, try to avoid going
1647   // through memory.
1648   auto *ExpectedVal = convertRValueToInt(Expected);
1649   auto *DesiredVal = convertRValueToInt(Desired);
1650   auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1651                                          Failure, IsWeak);
1652   return std::make_pair(
1653       ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1654                                 SourceLocation(), /*AsValue=*/false),
1655       Res.second);
1656 }
1657 
1658 static void
1659 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1660                       const llvm::function_ref<RValue(RValue)> &UpdateOp,
1661                       Address DesiredAddr) {
1662   RValue UpRVal;
1663   LValue AtomicLVal = Atomics.getAtomicLValue();
1664   LValue DesiredLVal;
1665   if (AtomicLVal.isSimple()) {
1666     UpRVal = OldRVal;
1667     DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1668   } else {
1669     // Build new lvalue for temp address
1670     Address Ptr = Atomics.materializeRValue(OldRVal);
1671     LValue UpdateLVal;
1672     if (AtomicLVal.isBitField()) {
1673       UpdateLVal =
1674           LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1675                                AtomicLVal.getType(),
1676                                AtomicLVal.getBaseInfo(),
1677                                AtomicLVal.getTBAAInfo());
1678       DesiredLVal =
1679           LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1680                                AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1681                                AtomicLVal.getTBAAInfo());
1682     } else if (AtomicLVal.isVectorElt()) {
1683       UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1684                                          AtomicLVal.getType(),
1685                                          AtomicLVal.getBaseInfo(),
1686                                          AtomicLVal.getTBAAInfo());
1687       DesiredLVal = LValue::MakeVectorElt(
1688           DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1689           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1690     } else {
1691       assert(AtomicLVal.isExtVectorElt());
1692       UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1693                                             AtomicLVal.getType(),
1694                                             AtomicLVal.getBaseInfo(),
1695                                             AtomicLVal.getTBAAInfo());
1696       DesiredLVal = LValue::MakeExtVectorElt(
1697           DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1698           AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1699     }
1700     UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1701   }
1702   // Store new value in the corresponding memory area
1703   RValue NewRVal = UpdateOp(UpRVal);
1704   if (NewRVal.isScalar()) {
1705     CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1706   } else {
1707     assert(NewRVal.isComplex());
1708     CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1709                            /*isInit=*/false);
1710   }
1711 }
1712 
1713 void AtomicInfo::EmitAtomicUpdateLibcall(
1714     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1715     bool IsVolatile) {
1716   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1717 
1718   Address ExpectedAddr = CreateTempAlloca();
1719 
1720   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1721   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1722   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1723   CGF.EmitBlock(ContBB);
1724   Address DesiredAddr = CreateTempAlloca();
1725   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1726       requiresMemSetZero(getAtomicAddress().getElementType())) {
1727     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1728     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1729   }
1730   auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1731                                            AggValueSlot::ignored(),
1732                                            SourceLocation(), /*AsValue=*/false);
1733   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1734   auto *Res =
1735       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1736                                        DesiredAddr.getPointer(),
1737                                        AO, Failure);
1738   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1739   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1740 }
1741 
1742 void AtomicInfo::EmitAtomicUpdateOp(
1743     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1744     bool IsVolatile) {
1745   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1746 
1747   // Do the atomic load.
1748   auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1749   // For non-simple lvalues perform compare-and-swap procedure.
1750   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1751   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1752   auto *CurBB = CGF.Builder.GetInsertBlock();
1753   CGF.EmitBlock(ContBB);
1754   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1755                                              /*NumReservedValues=*/2);
1756   PHI->addIncoming(OldVal, CurBB);
1757   Address NewAtomicAddr = CreateTempAlloca();
1758   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1759   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1760       requiresMemSetZero(getAtomicAddress().getElementType())) {
1761     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1762   }
1763   auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1764                                            SourceLocation(), /*AsValue=*/false);
1765   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1766   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1767   // Try to write new value using cmpxchg operation
1768   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1769   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1770   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1771   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1772 }
1773 
1774 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1775                                   RValue UpdateRVal, Address DesiredAddr) {
1776   LValue AtomicLVal = Atomics.getAtomicLValue();
1777   LValue DesiredLVal;
1778   // Build new lvalue for temp address
1779   if (AtomicLVal.isBitField()) {
1780     DesiredLVal =
1781         LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1782                              AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1783                              AtomicLVal.getTBAAInfo());
1784   } else if (AtomicLVal.isVectorElt()) {
1785     DesiredLVal =
1786         LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1787                               AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1788                               AtomicLVal.getTBAAInfo());
1789   } else {
1790     assert(AtomicLVal.isExtVectorElt());
1791     DesiredLVal = LValue::MakeExtVectorElt(
1792         DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1793         AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1794   }
1795   // Store new value in the corresponding memory area
1796   assert(UpdateRVal.isScalar());
1797   CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1798 }
1799 
1800 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1801                                          RValue UpdateRVal, bool IsVolatile) {
1802   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1803 
1804   Address ExpectedAddr = CreateTempAlloca();
1805 
1806   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1807   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1808   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1809   CGF.EmitBlock(ContBB);
1810   Address DesiredAddr = CreateTempAlloca();
1811   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1812       requiresMemSetZero(getAtomicAddress().getElementType())) {
1813     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1814     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1815   }
1816   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1817   auto *Res =
1818       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1819                                        DesiredAddr.getPointer(),
1820                                        AO, Failure);
1821   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1822   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1823 }
1824 
1825 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1826                                     bool IsVolatile) {
1827   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1828 
1829   // Do the atomic load.
1830   auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1831   // For non-simple lvalues perform compare-and-swap procedure.
1832   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1833   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1834   auto *CurBB = CGF.Builder.GetInsertBlock();
1835   CGF.EmitBlock(ContBB);
1836   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1837                                              /*NumReservedValues=*/2);
1838   PHI->addIncoming(OldVal, CurBB);
1839   Address NewAtomicAddr = CreateTempAlloca();
1840   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1841   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1842       requiresMemSetZero(getAtomicAddress().getElementType())) {
1843     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1844   }
1845   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1846   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1847   // Try to write new value using cmpxchg operation
1848   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1849   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1850   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1851   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1852 }
1853 
1854 void AtomicInfo::EmitAtomicUpdate(
1855     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1856     bool IsVolatile) {
1857   if (shouldUseLibcall()) {
1858     EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1859   } else {
1860     EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1861   }
1862 }
1863 
1864 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1865                                   bool IsVolatile) {
1866   if (shouldUseLibcall()) {
1867     EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1868   } else {
1869     EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1870   }
1871 }
1872 
1873 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1874                                       bool isInit) {
1875   bool IsVolatile = lvalue.isVolatileQualified();
1876   llvm::AtomicOrdering AO;
1877   if (lvalue.getType()->isAtomicType()) {
1878     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1879   } else {
1880     AO = llvm::AtomicOrdering::Release;
1881     IsVolatile = true;
1882   }
1883   return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1884 }
1885 
1886 /// Emit a store to an l-value of atomic type.
1887 ///
1888 /// Note that the r-value is expected to be an r-value *of the atomic
1889 /// type*; this means that for aggregate r-values, it should include
1890 /// storage for any padding that was necessary.
1891 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1892                                       llvm::AtomicOrdering AO, bool IsVolatile,
1893                                       bool isInit) {
1894   // If this is an aggregate r-value, it should agree in type except
1895   // maybe for address-space qualification.
1896   assert(!rvalue.isAggregate() ||
1897          rvalue.getAggregateAddress().getElementType()
1898            == dest.getAddress().getElementType());
1899 
1900   AtomicInfo atomics(*this, dest);
1901   LValue LVal = atomics.getAtomicLValue();
1902 
1903   // If this is an initialization, just put the value there normally.
1904   if (LVal.isSimple()) {
1905     if (isInit) {
1906       atomics.emitCopyIntoMemory(rvalue);
1907       return;
1908     }
1909 
1910     // Check whether we should use a library call.
1911     if (atomics.shouldUseLibcall()) {
1912       // Produce a source address.
1913       Address srcAddr = atomics.materializeRValue(rvalue);
1914 
1915       // void __atomic_store(size_t size, void *mem, void *val, int order)
1916       CallArgList args;
1917       args.add(RValue::get(atomics.getAtomicSizeValue()),
1918                getContext().getSizeType());
1919       args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
1920                getContext().VoidPtrTy);
1921       args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
1922                getContext().VoidPtrTy);
1923       args.add(
1924           RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
1925           getContext().IntTy);
1926       emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1927       return;
1928     }
1929 
1930     // Okay, we're doing this natively.
1931     llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1932 
1933     // Do the atomic store.
1934     Address addr =
1935         atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1936     intValue = Builder.CreateIntCast(
1937         intValue, addr.getElementType(), /*isSigned=*/false);
1938     llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1939 
1940     // Initializations don't need to be atomic.
1941     if (!isInit)
1942       store->setAtomic(AO);
1943 
1944     // Other decoration.
1945     if (IsVolatile)
1946       store->setVolatile(true);
1947     CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
1948     return;
1949   }
1950 
1951   // Emit simple atomic update operation.
1952   atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1953 }
1954 
1955 /// Emit a compare-and-exchange op for atomic type.
1956 ///
1957 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
1958     LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1959     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1960     AggValueSlot Slot) {
1961   // If this is an aggregate r-value, it should agree in type except
1962   // maybe for address-space qualification.
1963   assert(!Expected.isAggregate() ||
1964          Expected.getAggregateAddress().getElementType() ==
1965              Obj.getAddress().getElementType());
1966   assert(!Desired.isAggregate() ||
1967          Desired.getAggregateAddress().getElementType() ==
1968              Obj.getAddress().getElementType());
1969   AtomicInfo Atomics(*this, Obj);
1970 
1971   return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1972                                            IsWeak);
1973 }
1974 
1975 void CodeGenFunction::EmitAtomicUpdate(
1976     LValue LVal, llvm::AtomicOrdering AO,
1977     const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
1978   AtomicInfo Atomics(*this, LVal);
1979   Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
1980 }
1981 
1982 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1983   AtomicInfo atomics(*this, dest);
1984 
1985   switch (atomics.getEvaluationKind()) {
1986   case TEK_Scalar: {
1987     llvm::Value *value = EmitScalarExpr(init);
1988     atomics.emitCopyIntoMemory(RValue::get(value));
1989     return;
1990   }
1991 
1992   case TEK_Complex: {
1993     ComplexPairTy value = EmitComplexExpr(init);
1994     atomics.emitCopyIntoMemory(RValue::getComplex(value));
1995     return;
1996   }
1997 
1998   case TEK_Aggregate: {
1999     // Fix up the destination if the initializer isn't an expression
2000     // of atomic type.
2001     bool Zeroed = false;
2002     if (!init->getType()->isAtomicType()) {
2003       Zeroed = atomics.emitMemSetZeroIfNecessary();
2004       dest = atomics.projectValue();
2005     }
2006 
2007     // Evaluate the expression directly into the destination.
2008     AggValueSlot slot = AggValueSlot::forLValue(dest,
2009                                         AggValueSlot::IsNotDestructed,
2010                                         AggValueSlot::DoesNotNeedGCBarriers,
2011                                         AggValueSlot::IsNotAliased,
2012                                         AggValueSlot::DoesNotOverlap,
2013                                         Zeroed ? AggValueSlot::IsZeroed :
2014                                                  AggValueSlot::IsNotZeroed);
2015 
2016     EmitAggExpr(init, slot);
2017     return;
2018   }
2019   }
2020   llvm_unreachable("bad evaluation kind");
2021 }
2022