1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the code for emitting atomic operations.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "CGRecordLayout.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/CodeGen/CGFunctionInfo.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/Intrinsics.h"
22 #include "llvm/IR/Operator.h"
23 
24 using namespace clang;
25 using namespace CodeGen;
26 
27 namespace {
28   class AtomicInfo {
29     CodeGenFunction &CGF;
30     QualType AtomicTy;
31     QualType ValueTy;
32     uint64_t AtomicSizeInBits;
33     uint64_t ValueSizeInBits;
34     CharUnits AtomicAlign;
35     CharUnits ValueAlign;
36     CharUnits LValueAlign;
37     TypeEvaluationKind EvaluationKind;
38     bool UseLibcall;
39     LValue LVal;
40     CGBitFieldInfo BFI;
41   public:
42     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
43         : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
44           EvaluationKind(TEK_Scalar), UseLibcall(true) {
45       assert(!lvalue.isGlobalReg());
46       ASTContext &C = CGF.getContext();
47       if (lvalue.isSimple()) {
48         AtomicTy = lvalue.getType();
49         if (auto *ATy = AtomicTy->getAs<AtomicType>())
50           ValueTy = ATy->getValueType();
51         else
52           ValueTy = AtomicTy;
53         EvaluationKind = CGF.getEvaluationKind(ValueTy);
54 
55         uint64_t ValueAlignInBits;
56         uint64_t AtomicAlignInBits;
57         TypeInfo ValueTI = C.getTypeInfo(ValueTy);
58         ValueSizeInBits = ValueTI.Width;
59         ValueAlignInBits = ValueTI.Align;
60 
61         TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
62         AtomicSizeInBits = AtomicTI.Width;
63         AtomicAlignInBits = AtomicTI.Align;
64 
65         assert(ValueSizeInBits <= AtomicSizeInBits);
66         assert(ValueAlignInBits <= AtomicAlignInBits);
67 
68         AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
69         ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
70         if (lvalue.getAlignment().isZero())
71           lvalue.setAlignment(AtomicAlign);
72 
73         LVal = lvalue;
74       } else if (lvalue.isBitField()) {
75         ValueTy = lvalue.getType();
76         ValueSizeInBits = C.getTypeSize(ValueTy);
77         auto &OrigBFI = lvalue.getBitFieldInfo();
78         auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
79         AtomicSizeInBits = C.toBits(
80             C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
81                 .alignTo(lvalue.getAlignment()));
82         auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
83         auto OffsetInChars =
84             (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
85             lvalue.getAlignment();
86         VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
87             VoidPtrAddr, OffsetInChars.getQuantity());
88         auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
89             VoidPtrAddr,
90             CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
91             "atomic_bitfield_base");
92         BFI = OrigBFI;
93         BFI.Offset = Offset;
94         BFI.StorageSize = AtomicSizeInBits;
95         BFI.StorageOffset += OffsetInChars;
96         LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
97                                     BFI, lvalue.getType(),
98                                     lvalue.getAlignmentSource());
99         LVal.setTBAAInfo(lvalue.getTBAAInfo());
100         AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101         if (AtomicTy.isNull()) {
102           llvm::APInt Size(
103               /*numBits=*/32,
104               C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
105           AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
106                                             /*IndexTypeQuals=*/0);
107         }
108         AtomicAlign = ValueAlign = lvalue.getAlignment();
109       } else if (lvalue.isVectorElt()) {
110         ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
111         ValueSizeInBits = C.getTypeSize(ValueTy);
112         AtomicTy = lvalue.getType();
113         AtomicSizeInBits = C.getTypeSize(AtomicTy);
114         AtomicAlign = ValueAlign = lvalue.getAlignment();
115         LVal = lvalue;
116       } else {
117         assert(lvalue.isExtVectorElt());
118         ValueTy = lvalue.getType();
119         ValueSizeInBits = C.getTypeSize(ValueTy);
120         AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
121             lvalue.getType(), lvalue.getExtVectorAddress()
122                                   .getElementType()->getVectorNumElements());
123         AtomicSizeInBits = C.getTypeSize(AtomicTy);
124         AtomicAlign = ValueAlign = lvalue.getAlignment();
125         LVal = lvalue;
126       }
127       UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
128           AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
129     }
130 
131     QualType getAtomicType() const { return AtomicTy; }
132     QualType getValueType() const { return ValueTy; }
133     CharUnits getAtomicAlignment() const { return AtomicAlign; }
134     CharUnits getValueAlignment() const { return ValueAlign; }
135     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
136     uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
137     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
138     bool shouldUseLibcall() const { return UseLibcall; }
139     const LValue &getAtomicLValue() const { return LVal; }
140     llvm::Value *getAtomicPointer() const {
141       if (LVal.isSimple())
142         return LVal.getPointer();
143       else if (LVal.isBitField())
144         return LVal.getBitFieldPointer();
145       else if (LVal.isVectorElt())
146         return LVal.getVectorPointer();
147       assert(LVal.isExtVectorElt());
148       return LVal.getExtVectorPointer();
149     }
150     Address getAtomicAddress() const {
151       return Address(getAtomicPointer(), getAtomicAlignment());
152     }
153 
154     Address getAtomicAddressAsAtomicIntPointer() const {
155       return emitCastToAtomicIntPointer(getAtomicAddress());
156     }
157 
158     /// Is the atomic size larger than the underlying value type?
159     ///
160     /// Note that the absence of padding does not mean that atomic
161     /// objects are completely interchangeable with non-atomic
162     /// objects: we might have promoted the alignment of a type
163     /// without making it bigger.
164     bool hasPadding() const {
165       return (ValueSizeInBits != AtomicSizeInBits);
166     }
167 
168     bool emitMemSetZeroIfNecessary() const;
169 
170     llvm::Value *getAtomicSizeValue() const {
171       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
172       return CGF.CGM.getSize(size);
173     }
174 
175     /// Cast the given pointer to an integer pointer suitable for atomic
176     /// operations if the source.
177     Address emitCastToAtomicIntPointer(Address Addr) const;
178 
179     /// If Addr is compatible with the iN that will be used for an atomic
180     /// operation, bitcast it. Otherwise, create a temporary that is suitable
181     /// and copy the value across.
182     Address convertToAtomicIntPointer(Address Addr) const;
183 
184     /// Turn an atomic-layout object into an r-value.
185     RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
186                                      SourceLocation loc, bool AsValue) const;
187 
188     /// \brief Converts a rvalue to integer value.
189     llvm::Value *convertRValueToInt(RValue RVal) const;
190 
191     RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
192                                      AggValueSlot ResultSlot,
193                                      SourceLocation Loc, bool AsValue) const;
194 
195     /// Copy an atomic r-value into atomic-layout memory.
196     void emitCopyIntoMemory(RValue rvalue) const;
197 
198     /// Project an l-value down to the value field.
199     LValue projectValue() const {
200       assert(LVal.isSimple());
201       Address addr = getAtomicAddress();
202       if (hasPadding())
203         addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
204 
205       return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
206                               LVal.getAlignmentSource(), LVal.getTBAAInfo());
207     }
208 
209     /// \brief Emits atomic load.
210     /// \returns Loaded value.
211     RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
212                           bool AsValue, llvm::AtomicOrdering AO,
213                           bool IsVolatile);
214 
215     /// \brief Emits atomic compare-and-exchange sequence.
216     /// \param Expected Expected value.
217     /// \param Desired Desired value.
218     /// \param Success Atomic ordering for success operation.
219     /// \param Failure Atomic ordering for failed operation.
220     /// \param IsWeak true if atomic operation is weak, false otherwise.
221     /// \returns Pair of values: previous value from storage (value type) and
222     /// boolean flag (i1 type) with true if success and false otherwise.
223     std::pair<RValue, llvm::Value *>
224     EmitAtomicCompareExchange(RValue Expected, RValue Desired,
225                               llvm::AtomicOrdering Success =
226                                   llvm::AtomicOrdering::SequentiallyConsistent,
227                               llvm::AtomicOrdering Failure =
228                                   llvm::AtomicOrdering::SequentiallyConsistent,
229                               bool IsWeak = false);
230 
231     /// \brief Emits atomic update.
232     /// \param AO Atomic ordering.
233     /// \param UpdateOp Update operation for the current lvalue.
234     void EmitAtomicUpdate(llvm::AtomicOrdering AO,
235                           const llvm::function_ref<RValue(RValue)> &UpdateOp,
236                           bool IsVolatile);
237     /// \brief Emits atomic update.
238     /// \param AO Atomic ordering.
239     void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
240                           bool IsVolatile);
241 
242     /// Materialize an atomic r-value in atomic-layout memory.
243     Address materializeRValue(RValue rvalue) const;
244 
245     /// \brief Creates temp alloca for intermediate operations on atomic value.
246     Address CreateTempAlloca() const;
247   private:
248     bool requiresMemSetZero(llvm::Type *type) const;
249 
250 
251     /// \brief Emits atomic load as a libcall.
252     void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
253                                llvm::AtomicOrdering AO, bool IsVolatile);
254     /// \brief Emits atomic load as LLVM instruction.
255     llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
256     /// \brief Emits atomic compare-and-exchange op as a libcall.
257     llvm::Value *EmitAtomicCompareExchangeLibcall(
258         llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
259         llvm::AtomicOrdering Success =
260             llvm::AtomicOrdering::SequentiallyConsistent,
261         llvm::AtomicOrdering Failure =
262             llvm::AtomicOrdering::SequentiallyConsistent);
263     /// \brief Emits atomic compare-and-exchange op as LLVM instruction.
264     std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
265         llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
266         llvm::AtomicOrdering Success =
267             llvm::AtomicOrdering::SequentiallyConsistent,
268         llvm::AtomicOrdering Failure =
269             llvm::AtomicOrdering::SequentiallyConsistent,
270         bool IsWeak = false);
271     /// \brief Emit atomic update as libcalls.
272     void
273     EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
274                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
275                             bool IsVolatile);
276     /// \brief Emit atomic update as LLVM instructions.
277     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
278                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
279                             bool IsVolatile);
280     /// \brief Emit atomic update as libcalls.
281     void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
282                                  bool IsVolatile);
283     /// \brief Emit atomic update as LLVM instructions.
284     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
285                             bool IsVolatile);
286   };
287 }
288 
289 Address AtomicInfo::CreateTempAlloca() const {
290   Address TempAlloca = CGF.CreateMemTemp(
291       (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
292                                                                 : AtomicTy,
293       getAtomicAlignment(),
294       "atomic-temp");
295   // Cast to pointer to value type for bitfields.
296   if (LVal.isBitField())
297     return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
298         TempAlloca, getAtomicAddress().getType());
299   return TempAlloca;
300 }
301 
302 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
303                                 StringRef fnName,
304                                 QualType resultType,
305                                 CallArgList &args) {
306   const CGFunctionInfo &fnInfo =
307     CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
308   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
309   llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
310   return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
311 }
312 
313 /// Does a store of the given IR type modify the full expected width?
314 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
315                            uint64_t expectedSize) {
316   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
317 }
318 
319 /// Does the atomic type require memsetting to zero before initialization?
320 ///
321 /// The IR type is provided as a way of making certain queries faster.
322 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
323   // If the atomic type has size padding, we definitely need a memset.
324   if (hasPadding()) return true;
325 
326   // Otherwise, do some simple heuristics to try to avoid it:
327   switch (getEvaluationKind()) {
328   // For scalars and complexes, check whether the store size of the
329   // type uses the full size.
330   case TEK_Scalar:
331     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
332   case TEK_Complex:
333     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
334                            AtomicSizeInBits / 2);
335 
336   // Padding in structs has an undefined bit pattern.  User beware.
337   case TEK_Aggregate:
338     return false;
339   }
340   llvm_unreachable("bad evaluation kind");
341 }
342 
343 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
344   assert(LVal.isSimple());
345   llvm::Value *addr = LVal.getPointer();
346   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
347     return false;
348 
349   CGF.Builder.CreateMemSet(
350       addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
351       CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
352       LVal.getAlignment().getQuantity());
353   return true;
354 }
355 
356 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
357                               Address Dest, Address Ptr,
358                               Address Val1, Address Val2,
359                               uint64_t Size,
360                               llvm::AtomicOrdering SuccessOrder,
361                               llvm::AtomicOrdering FailureOrder) {
362   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
363   llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
364   llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
365 
366   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
367       Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder);
368   Pair->setVolatile(E->isVolatile());
369   Pair->setWeak(IsWeak);
370 
371   // Cmp holds the result of the compare-exchange operation: true on success,
372   // false on failure.
373   llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
374   llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
375 
376   // This basic block is used to hold the store instruction if the operation
377   // failed.
378   llvm::BasicBlock *StoreExpectedBB =
379       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
380 
381   // This basic block is the exit point of the operation, we should end up
382   // here regardless of whether or not the operation succeeded.
383   llvm::BasicBlock *ContinueBB =
384       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
385 
386   // Update Expected if Expected isn't equal to Old, otherwise branch to the
387   // exit point.
388   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
389 
390   CGF.Builder.SetInsertPoint(StoreExpectedBB);
391   // Update the memory at Expected with Old's value.
392   CGF.Builder.CreateStore(Old, Val1);
393   // Finally, branch to the exit point.
394   CGF.Builder.CreateBr(ContinueBB);
395 
396   CGF.Builder.SetInsertPoint(ContinueBB);
397   // Update the memory at Dest with Cmp's value.
398   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
399 }
400 
401 /// Given an ordering required on success, emit all possible cmpxchg
402 /// instructions to cope with the provided (but possibly only dynamically known)
403 /// FailureOrder.
404 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
405                                         bool IsWeak, Address Dest, Address Ptr,
406                                         Address Val1, Address Val2,
407                                         llvm::Value *FailureOrderVal,
408                                         uint64_t Size,
409                                         llvm::AtomicOrdering SuccessOrder) {
410   llvm::AtomicOrdering FailureOrder;
411   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
412     auto FOS = FO->getSExtValue();
413     if (!llvm::isValidAtomicOrderingCABI(FOS))
414       FailureOrder = llvm::AtomicOrdering::Monotonic;
415     else
416       switch ((llvm::AtomicOrderingCABI)FOS) {
417       case llvm::AtomicOrderingCABI::relaxed:
418       case llvm::AtomicOrderingCABI::release:
419       case llvm::AtomicOrderingCABI::acq_rel:
420         FailureOrder = llvm::AtomicOrdering::Monotonic;
421         break;
422       case llvm::AtomicOrderingCABI::consume:
423       case llvm::AtomicOrderingCABI::acquire:
424         FailureOrder = llvm::AtomicOrdering::Acquire;
425         break;
426       case llvm::AtomicOrderingCABI::seq_cst:
427         FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
428         break;
429       }
430     if (isStrongerThan(FailureOrder, SuccessOrder)) {
431       // Don't assert on undefined behavior "failure argument shall be no
432       // stronger than the success argument".
433       FailureOrder =
434           llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
435     }
436     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
437                       FailureOrder);
438     return;
439   }
440 
441   // Create all the relevant BB's
442   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
443                    *SeqCstBB = nullptr;
444   MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
445   if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
446       SuccessOrder != llvm::AtomicOrdering::Release)
447     AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
448   if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
449     SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
450 
451   llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
452 
453   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
454 
455   // Emit all the different atomics
456 
457   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
458   // doesn't matter unless someone is crazy enough to use something that
459   // doesn't fold to a constant for the ordering.
460   CGF.Builder.SetInsertPoint(MonotonicBB);
461   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
462                     Size, SuccessOrder, llvm::AtomicOrdering::Monotonic);
463   CGF.Builder.CreateBr(ContBB);
464 
465   if (AcquireBB) {
466     CGF.Builder.SetInsertPoint(AcquireBB);
467     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
468                       Size, SuccessOrder, llvm::AtomicOrdering::Acquire);
469     CGF.Builder.CreateBr(ContBB);
470     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
471                 AcquireBB);
472     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
473                 AcquireBB);
474   }
475   if (SeqCstBB) {
476     CGF.Builder.SetInsertPoint(SeqCstBB);
477     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
478                       llvm::AtomicOrdering::SequentiallyConsistent);
479     CGF.Builder.CreateBr(ContBB);
480     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
481                 SeqCstBB);
482   }
483 
484   CGF.Builder.SetInsertPoint(ContBB);
485 }
486 
487 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
488                          Address Ptr, Address Val1, Address Val2,
489                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
490                          uint64_t Size, llvm::AtomicOrdering Order) {
491   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
492   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
493 
494   switch (E->getOp()) {
495   case AtomicExpr::AO__c11_atomic_init:
496     llvm_unreachable("Already handled!");
497 
498   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
499     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
500                                 FailureOrder, Size, Order);
501     return;
502   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
503     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
504                                 FailureOrder, Size, Order);
505     return;
506   case AtomicExpr::AO__atomic_compare_exchange:
507   case AtomicExpr::AO__atomic_compare_exchange_n: {
508     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
509       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
510                                   Val1, Val2, FailureOrder, Size, Order);
511     } else {
512       // Create all the relevant BB's
513       llvm::BasicBlock *StrongBB =
514           CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
515       llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
516       llvm::BasicBlock *ContBB =
517           CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
518 
519       llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
520       SI->addCase(CGF.Builder.getInt1(false), StrongBB);
521 
522       CGF.Builder.SetInsertPoint(StrongBB);
523       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
524                                   FailureOrder, Size, Order);
525       CGF.Builder.CreateBr(ContBB);
526 
527       CGF.Builder.SetInsertPoint(WeakBB);
528       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
529                                   FailureOrder, Size, Order);
530       CGF.Builder.CreateBr(ContBB);
531 
532       CGF.Builder.SetInsertPoint(ContBB);
533     }
534     return;
535   }
536   case AtomicExpr::AO__c11_atomic_load:
537   case AtomicExpr::AO__atomic_load_n:
538   case AtomicExpr::AO__atomic_load: {
539     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
540     Load->setAtomic(Order);
541     Load->setVolatile(E->isVolatile());
542     CGF.Builder.CreateStore(Load, Dest);
543     return;
544   }
545 
546   case AtomicExpr::AO__c11_atomic_store:
547   case AtomicExpr::AO__atomic_store:
548   case AtomicExpr::AO__atomic_store_n: {
549     llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
550     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
551     Store->setAtomic(Order);
552     Store->setVolatile(E->isVolatile());
553     return;
554   }
555 
556   case AtomicExpr::AO__c11_atomic_exchange:
557   case AtomicExpr::AO__atomic_exchange_n:
558   case AtomicExpr::AO__atomic_exchange:
559     Op = llvm::AtomicRMWInst::Xchg;
560     break;
561 
562   case AtomicExpr::AO__atomic_add_fetch:
563     PostOp = llvm::Instruction::Add;
564     // Fall through.
565   case AtomicExpr::AO__c11_atomic_fetch_add:
566   case AtomicExpr::AO__atomic_fetch_add:
567     Op = llvm::AtomicRMWInst::Add;
568     break;
569 
570   case AtomicExpr::AO__atomic_sub_fetch:
571     PostOp = llvm::Instruction::Sub;
572     // Fall through.
573   case AtomicExpr::AO__c11_atomic_fetch_sub:
574   case AtomicExpr::AO__atomic_fetch_sub:
575     Op = llvm::AtomicRMWInst::Sub;
576     break;
577 
578   case AtomicExpr::AO__atomic_and_fetch:
579     PostOp = llvm::Instruction::And;
580     // Fall through.
581   case AtomicExpr::AO__c11_atomic_fetch_and:
582   case AtomicExpr::AO__atomic_fetch_and:
583     Op = llvm::AtomicRMWInst::And;
584     break;
585 
586   case AtomicExpr::AO__atomic_or_fetch:
587     PostOp = llvm::Instruction::Or;
588     // Fall through.
589   case AtomicExpr::AO__c11_atomic_fetch_or:
590   case AtomicExpr::AO__atomic_fetch_or:
591     Op = llvm::AtomicRMWInst::Or;
592     break;
593 
594   case AtomicExpr::AO__atomic_xor_fetch:
595     PostOp = llvm::Instruction::Xor;
596     // Fall through.
597   case AtomicExpr::AO__c11_atomic_fetch_xor:
598   case AtomicExpr::AO__atomic_fetch_xor:
599     Op = llvm::AtomicRMWInst::Xor;
600     break;
601 
602   case AtomicExpr::AO__atomic_nand_fetch:
603     PostOp = llvm::Instruction::And; // the NOT is special cased below
604   // Fall through.
605   case AtomicExpr::AO__atomic_fetch_nand:
606     Op = llvm::AtomicRMWInst::Nand;
607     break;
608   }
609 
610   llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
611   llvm::AtomicRMWInst *RMWI =
612       CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order);
613   RMWI->setVolatile(E->isVolatile());
614 
615   // For __atomic_*_fetch operations, perform the operation again to
616   // determine the value which was written.
617   llvm::Value *Result = RMWI;
618   if (PostOp)
619     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
620   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
621     Result = CGF.Builder.CreateNot(Result);
622   CGF.Builder.CreateStore(Result, Dest);
623 }
624 
625 // This function emits any expression (scalar, complex, or aggregate)
626 // into a temporary alloca.
627 static Address
628 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
629   Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
630   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
631                        /*Init*/ true);
632   return DeclPtr;
633 }
634 
635 static void
636 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
637                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
638                   SourceLocation Loc, CharUnits SizeInChars) {
639   if (UseOptimizedLibcall) {
640     // Load value and pass it to the function directly.
641     CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
642     int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
643     ValTy =
644         CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
645     llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
646                                                 SizeInBits)->getPointerTo();
647     Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
648     Val = CGF.EmitLoadOfScalar(Ptr, false,
649                                CGF.getContext().getPointerType(ValTy),
650                                Loc);
651     // Coerce the value into an appropriately sized integer type.
652     Args.add(RValue::get(Val), ValTy);
653   } else {
654     // Non-optimized functions always take a reference.
655     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
656                          CGF.getContext().VoidPtrTy);
657   }
658 }
659 
660 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
661   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
662   QualType MemTy = AtomicTy;
663   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
664     MemTy = AT->getValueType();
665   CharUnits sizeChars, alignChars;
666   std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
667   uint64_t Size = sizeChars.getQuantity();
668   unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
669   bool UseLibcall = (sizeChars != alignChars ||
670                      getContext().toBits(sizeChars) > MaxInlineWidthInBits);
671 
672   llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
673 
674   Address Val1 = Address::invalid();
675   Address Val2 = Address::invalid();
676   Address Dest = Address::invalid();
677   Address Ptr(EmitScalarExpr(E->getPtr()), alignChars);
678 
679   if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
680     LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
681     EmitAtomicInit(E->getVal1(), lvalue);
682     return RValue::get(nullptr);
683   }
684 
685   llvm::Value *Order = EmitScalarExpr(E->getOrder());
686 
687   switch (E->getOp()) {
688   case AtomicExpr::AO__c11_atomic_init:
689     llvm_unreachable("Already handled above with EmitAtomicInit!");
690 
691   case AtomicExpr::AO__c11_atomic_load:
692   case AtomicExpr::AO__atomic_load_n:
693     break;
694 
695   case AtomicExpr::AO__atomic_load:
696     Dest = EmitPointerWithAlignment(E->getVal1());
697     break;
698 
699   case AtomicExpr::AO__atomic_store:
700     Val1 = EmitPointerWithAlignment(E->getVal1());
701     break;
702 
703   case AtomicExpr::AO__atomic_exchange:
704     Val1 = EmitPointerWithAlignment(E->getVal1());
705     Dest = EmitPointerWithAlignment(E->getVal2());
706     break;
707 
708   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
709   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
710   case AtomicExpr::AO__atomic_compare_exchange_n:
711   case AtomicExpr::AO__atomic_compare_exchange:
712     Val1 = EmitPointerWithAlignment(E->getVal1());
713     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
714       Val2 = EmitPointerWithAlignment(E->getVal2());
715     else
716       Val2 = EmitValToTemp(*this, E->getVal2());
717     OrderFail = EmitScalarExpr(E->getOrderFail());
718     if (E->getNumSubExprs() == 6)
719       IsWeak = EmitScalarExpr(E->getWeak());
720     break;
721 
722   case AtomicExpr::AO__c11_atomic_fetch_add:
723   case AtomicExpr::AO__c11_atomic_fetch_sub:
724     if (MemTy->isPointerType()) {
725       // For pointer arithmetic, we're required to do a bit of math:
726       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
727       // ... but only for the C11 builtins. The GNU builtins expect the
728       // user to multiply by sizeof(T).
729       QualType Val1Ty = E->getVal1()->getType();
730       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
731       CharUnits PointeeIncAmt =
732           getContext().getTypeSizeInChars(MemTy->getPointeeType());
733       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
734       auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
735       Val1 = Temp;
736       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
737       break;
738     }
739     // Fall through.
740   case AtomicExpr::AO__atomic_fetch_add:
741   case AtomicExpr::AO__atomic_fetch_sub:
742   case AtomicExpr::AO__atomic_add_fetch:
743   case AtomicExpr::AO__atomic_sub_fetch:
744   case AtomicExpr::AO__c11_atomic_store:
745   case AtomicExpr::AO__c11_atomic_exchange:
746   case AtomicExpr::AO__atomic_store_n:
747   case AtomicExpr::AO__atomic_exchange_n:
748   case AtomicExpr::AO__c11_atomic_fetch_and:
749   case AtomicExpr::AO__c11_atomic_fetch_or:
750   case AtomicExpr::AO__c11_atomic_fetch_xor:
751   case AtomicExpr::AO__atomic_fetch_and:
752   case AtomicExpr::AO__atomic_fetch_or:
753   case AtomicExpr::AO__atomic_fetch_xor:
754   case AtomicExpr::AO__atomic_fetch_nand:
755   case AtomicExpr::AO__atomic_and_fetch:
756   case AtomicExpr::AO__atomic_or_fetch:
757   case AtomicExpr::AO__atomic_xor_fetch:
758   case AtomicExpr::AO__atomic_nand_fetch:
759     Val1 = EmitValToTemp(*this, E->getVal1());
760     break;
761   }
762 
763   QualType RValTy = E->getType().getUnqualifiedType();
764 
765   // The inlined atomics only function on iN types, where N is a power of 2. We
766   // need to make sure (via temporaries if necessary) that all incoming values
767   // are compatible.
768   LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
769   AtomicInfo Atomics(*this, AtomicVal);
770 
771   Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
772   if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
773   if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
774   if (Dest.isValid())
775     Dest = Atomics.emitCastToAtomicIntPointer(Dest);
776   else if (E->isCmpXChg())
777     Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
778   else if (!RValTy->isVoidType())
779     Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
780 
781   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
782   if (UseLibcall) {
783     bool UseOptimizedLibcall = false;
784     switch (E->getOp()) {
785     case AtomicExpr::AO__c11_atomic_init:
786       llvm_unreachable("Already handled above with EmitAtomicInit!");
787 
788     case AtomicExpr::AO__c11_atomic_fetch_add:
789     case AtomicExpr::AO__atomic_fetch_add:
790     case AtomicExpr::AO__c11_atomic_fetch_and:
791     case AtomicExpr::AO__atomic_fetch_and:
792     case AtomicExpr::AO__c11_atomic_fetch_or:
793     case AtomicExpr::AO__atomic_fetch_or:
794     case AtomicExpr::AO__atomic_fetch_nand:
795     case AtomicExpr::AO__c11_atomic_fetch_sub:
796     case AtomicExpr::AO__atomic_fetch_sub:
797     case AtomicExpr::AO__c11_atomic_fetch_xor:
798     case AtomicExpr::AO__atomic_fetch_xor:
799     case AtomicExpr::AO__atomic_add_fetch:
800     case AtomicExpr::AO__atomic_and_fetch:
801     case AtomicExpr::AO__atomic_nand_fetch:
802     case AtomicExpr::AO__atomic_or_fetch:
803     case AtomicExpr::AO__atomic_sub_fetch:
804     case AtomicExpr::AO__atomic_xor_fetch:
805       // For these, only library calls for certain sizes exist.
806       UseOptimizedLibcall = true;
807       break;
808 
809     case AtomicExpr::AO__c11_atomic_load:
810     case AtomicExpr::AO__c11_atomic_store:
811     case AtomicExpr::AO__c11_atomic_exchange:
812     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
813     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
814     case AtomicExpr::AO__atomic_load_n:
815     case AtomicExpr::AO__atomic_load:
816     case AtomicExpr::AO__atomic_store_n:
817     case AtomicExpr::AO__atomic_store:
818     case AtomicExpr::AO__atomic_exchange_n:
819     case AtomicExpr::AO__atomic_exchange:
820     case AtomicExpr::AO__atomic_compare_exchange_n:
821     case AtomicExpr::AO__atomic_compare_exchange:
822       // Only use optimized library calls for sizes for which they exist.
823       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
824         UseOptimizedLibcall = true;
825       break;
826     }
827 
828     CallArgList Args;
829     if (!UseOptimizedLibcall) {
830       // For non-optimized library calls, the size is the first parameter
831       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
832                getContext().getSizeType());
833     }
834     // Atomic address is the first or second parameter
835     Args.add(RValue::get(EmitCastToVoidPtr(Ptr.getPointer())),
836              getContext().VoidPtrTy);
837 
838     std::string LibCallName;
839     QualType LoweredMemTy =
840       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
841     QualType RetTy;
842     bool HaveRetTy = false;
843     llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
844     switch (E->getOp()) {
845     case AtomicExpr::AO__c11_atomic_init:
846       llvm_unreachable("Already handled!");
847 
848     // There is only one libcall for compare an exchange, because there is no
849     // optimisation benefit possible from a libcall version of a weak compare
850     // and exchange.
851     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
852     //                                void *desired, int success, int failure)
853     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
854     //                                  int success, int failure)
855     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
856     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
857     case AtomicExpr::AO__atomic_compare_exchange:
858     case AtomicExpr::AO__atomic_compare_exchange_n:
859       LibCallName = "__atomic_compare_exchange";
860       RetTy = getContext().BoolTy;
861       HaveRetTy = true;
862       Args.add(RValue::get(EmitCastToVoidPtr(Val1.getPointer())),
863                getContext().VoidPtrTy);
864       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
865                         MemTy, E->getExprLoc(), sizeChars);
866       Args.add(RValue::get(Order), getContext().IntTy);
867       Order = OrderFail;
868       break;
869     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
870     //                        int order)
871     // T __atomic_exchange_N(T *mem, T val, int order)
872     case AtomicExpr::AO__c11_atomic_exchange:
873     case AtomicExpr::AO__atomic_exchange_n:
874     case AtomicExpr::AO__atomic_exchange:
875       LibCallName = "__atomic_exchange";
876       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
877                         MemTy, E->getExprLoc(), sizeChars);
878       break;
879     // void __atomic_store(size_t size, void *mem, void *val, int order)
880     // void __atomic_store_N(T *mem, T val, int order)
881     case AtomicExpr::AO__c11_atomic_store:
882     case AtomicExpr::AO__atomic_store:
883     case AtomicExpr::AO__atomic_store_n:
884       LibCallName = "__atomic_store";
885       RetTy = getContext().VoidTy;
886       HaveRetTy = true;
887       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
888                         MemTy, E->getExprLoc(), sizeChars);
889       break;
890     // void __atomic_load(size_t size, void *mem, void *return, int order)
891     // T __atomic_load_N(T *mem, int order)
892     case AtomicExpr::AO__c11_atomic_load:
893     case AtomicExpr::AO__atomic_load:
894     case AtomicExpr::AO__atomic_load_n:
895       LibCallName = "__atomic_load";
896       break;
897     // T __atomic_add_fetch_N(T *mem, T val, int order)
898     // T __atomic_fetch_add_N(T *mem, T val, int order)
899     case AtomicExpr::AO__atomic_add_fetch:
900       PostOp = llvm::Instruction::Add;
901     // Fall through.
902     case AtomicExpr::AO__c11_atomic_fetch_add:
903     case AtomicExpr::AO__atomic_fetch_add:
904       LibCallName = "__atomic_fetch_add";
905       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
906                         LoweredMemTy, E->getExprLoc(), sizeChars);
907       break;
908     // T __atomic_and_fetch_N(T *mem, T val, int order)
909     // T __atomic_fetch_and_N(T *mem, T val, int order)
910     case AtomicExpr::AO__atomic_and_fetch:
911       PostOp = llvm::Instruction::And;
912     // Fall through.
913     case AtomicExpr::AO__c11_atomic_fetch_and:
914     case AtomicExpr::AO__atomic_fetch_and:
915       LibCallName = "__atomic_fetch_and";
916       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
917                         MemTy, E->getExprLoc(), sizeChars);
918       break;
919     // T __atomic_or_fetch_N(T *mem, T val, int order)
920     // T __atomic_fetch_or_N(T *mem, T val, int order)
921     case AtomicExpr::AO__atomic_or_fetch:
922       PostOp = llvm::Instruction::Or;
923     // Fall through.
924     case AtomicExpr::AO__c11_atomic_fetch_or:
925     case AtomicExpr::AO__atomic_fetch_or:
926       LibCallName = "__atomic_fetch_or";
927       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
928                         MemTy, E->getExprLoc(), sizeChars);
929       break;
930     // T __atomic_sub_fetch_N(T *mem, T val, int order)
931     // T __atomic_fetch_sub_N(T *mem, T val, int order)
932     case AtomicExpr::AO__atomic_sub_fetch:
933       PostOp = llvm::Instruction::Sub;
934     // Fall through.
935     case AtomicExpr::AO__c11_atomic_fetch_sub:
936     case AtomicExpr::AO__atomic_fetch_sub:
937       LibCallName = "__atomic_fetch_sub";
938       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
939                         LoweredMemTy, E->getExprLoc(), sizeChars);
940       break;
941     // T __atomic_xor_fetch_N(T *mem, T val, int order)
942     // T __atomic_fetch_xor_N(T *mem, T val, int order)
943     case AtomicExpr::AO__atomic_xor_fetch:
944       PostOp = llvm::Instruction::Xor;
945     // Fall through.
946     case AtomicExpr::AO__c11_atomic_fetch_xor:
947     case AtomicExpr::AO__atomic_fetch_xor:
948       LibCallName = "__atomic_fetch_xor";
949       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
950                         MemTy, E->getExprLoc(), sizeChars);
951       break;
952     // T __atomic_nand_fetch_N(T *mem, T val, int order)
953     // T __atomic_fetch_nand_N(T *mem, T val, int order)
954     case AtomicExpr::AO__atomic_nand_fetch:
955       PostOp = llvm::Instruction::And; // the NOT is special cased below
956     // Fall through.
957     case AtomicExpr::AO__atomic_fetch_nand:
958       LibCallName = "__atomic_fetch_nand";
959       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
960                         MemTy, E->getExprLoc(), sizeChars);
961       break;
962     }
963 
964     // Optimized functions have the size in their name.
965     if (UseOptimizedLibcall)
966       LibCallName += "_" + llvm::utostr(Size);
967     // By default, assume we return a value of the atomic type.
968     if (!HaveRetTy) {
969       if (UseOptimizedLibcall) {
970         // Value is returned directly.
971         // The function returns an appropriately sized integer type.
972         RetTy = getContext().getIntTypeForBitwidth(
973             getContext().toBits(sizeChars), /*Signed=*/false);
974       } else {
975         // Value is returned through parameter before the order.
976         RetTy = getContext().VoidTy;
977         Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
978                  getContext().VoidPtrTy);
979       }
980     }
981     // order is always the last parameter
982     Args.add(RValue::get(Order),
983              getContext().IntTy);
984 
985     // PostOp is only needed for the atomic_*_fetch operations, and
986     // thus is only needed for and implemented in the
987     // UseOptimizedLibcall codepath.
988     assert(UseOptimizedLibcall || !PostOp);
989 
990     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
991     // The value is returned directly from the libcall.
992     if (E->isCmpXChg())
993       return Res;
994 
995     // The value is returned directly for optimized libcalls but the expr
996     // provided an out-param.
997     if (UseOptimizedLibcall && Res.getScalarVal()) {
998       llvm::Value *ResVal = Res.getScalarVal();
999       if (PostOp) {
1000         llvm::Value *LoadVal1 = Args[1].RV.getScalarVal();
1001         ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1002       }
1003       if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1004         ResVal = Builder.CreateNot(ResVal);
1005 
1006       Builder.CreateStore(
1007           ResVal,
1008           Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1009     }
1010 
1011     if (RValTy->isVoidType())
1012       return RValue::get(nullptr);
1013 
1014     return convertTempToRValue(
1015         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1016         RValTy, E->getExprLoc());
1017   }
1018 
1019   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1020                  E->getOp() == AtomicExpr::AO__atomic_store ||
1021                  E->getOp() == AtomicExpr::AO__atomic_store_n;
1022   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1023                 E->getOp() == AtomicExpr::AO__atomic_load ||
1024                 E->getOp() == AtomicExpr::AO__atomic_load_n;
1025 
1026   if (isa<llvm::ConstantInt>(Order)) {
1027     auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1028     // We should not ever get to a case where the ordering isn't a valid C ABI
1029     // value, but it's hard to enforce that in general.
1030     if (llvm::isValidAtomicOrderingCABI(ord))
1031       switch ((llvm::AtomicOrderingCABI)ord) {
1032       case llvm::AtomicOrderingCABI::relaxed:
1033         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1034                      llvm::AtomicOrdering::Monotonic);
1035         break;
1036       case llvm::AtomicOrderingCABI::consume:
1037       case llvm::AtomicOrderingCABI::acquire:
1038         if (IsStore)
1039           break; // Avoid crashing on code with undefined behavior
1040         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1041                      llvm::AtomicOrdering::Acquire);
1042         break;
1043       case llvm::AtomicOrderingCABI::release:
1044         if (IsLoad)
1045           break; // Avoid crashing on code with undefined behavior
1046         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1047                      llvm::AtomicOrdering::Release);
1048         break;
1049       case llvm::AtomicOrderingCABI::acq_rel:
1050         if (IsLoad || IsStore)
1051           break; // Avoid crashing on code with undefined behavior
1052         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1053                      llvm::AtomicOrdering::AcquireRelease);
1054         break;
1055       case llvm::AtomicOrderingCABI::seq_cst:
1056         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1057                      llvm::AtomicOrdering::SequentiallyConsistent);
1058         break;
1059       }
1060     if (RValTy->isVoidType())
1061       return RValue::get(nullptr);
1062 
1063     return convertTempToRValue(
1064         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1065         RValTy, E->getExprLoc());
1066   }
1067 
1068   // Long case, when Order isn't obviously constant.
1069 
1070   // Create all the relevant BB's
1071   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1072                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1073                    *SeqCstBB = nullptr;
1074   MonotonicBB = createBasicBlock("monotonic", CurFn);
1075   if (!IsStore)
1076     AcquireBB = createBasicBlock("acquire", CurFn);
1077   if (!IsLoad)
1078     ReleaseBB = createBasicBlock("release", CurFn);
1079   if (!IsLoad && !IsStore)
1080     AcqRelBB = createBasicBlock("acqrel", CurFn);
1081   SeqCstBB = createBasicBlock("seqcst", CurFn);
1082   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1083 
1084   // Create the switch for the split
1085   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1086   // doesn't matter unless someone is crazy enough to use something that
1087   // doesn't fold to a constant for the ordering.
1088   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1089   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1090 
1091   // Emit all the different atomics
1092   Builder.SetInsertPoint(MonotonicBB);
1093   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1094                Size, llvm::AtomicOrdering::Monotonic);
1095   Builder.CreateBr(ContBB);
1096   if (!IsStore) {
1097     Builder.SetInsertPoint(AcquireBB);
1098     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1099                  Size, llvm::AtomicOrdering::Acquire);
1100     Builder.CreateBr(ContBB);
1101     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1102                 AcquireBB);
1103     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1104                 AcquireBB);
1105   }
1106   if (!IsLoad) {
1107     Builder.SetInsertPoint(ReleaseBB);
1108     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1109                  Size, llvm::AtomicOrdering::Release);
1110     Builder.CreateBr(ContBB);
1111     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1112                 ReleaseBB);
1113   }
1114   if (!IsLoad && !IsStore) {
1115     Builder.SetInsertPoint(AcqRelBB);
1116     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1117                  Size, llvm::AtomicOrdering::AcquireRelease);
1118     Builder.CreateBr(ContBB);
1119     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1120                 AcqRelBB);
1121   }
1122   Builder.SetInsertPoint(SeqCstBB);
1123   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1124                Size, llvm::AtomicOrdering::SequentiallyConsistent);
1125   Builder.CreateBr(ContBB);
1126   SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1127               SeqCstBB);
1128 
1129   // Cleanup and return
1130   Builder.SetInsertPoint(ContBB);
1131   if (RValTy->isVoidType())
1132     return RValue::get(nullptr);
1133 
1134   assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1135   return convertTempToRValue(
1136       Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1137       RValTy, E->getExprLoc());
1138 }
1139 
1140 Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1141   unsigned addrspace =
1142     cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1143   llvm::IntegerType *ty =
1144     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1145   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1146 }
1147 
1148 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1149   llvm::Type *Ty = Addr.getElementType();
1150   uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1151   if (SourceSizeInBits != AtomicSizeInBits) {
1152     Address Tmp = CreateTempAlloca();
1153     CGF.Builder.CreateMemCpy(Tmp, Addr,
1154                              std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1155     Addr = Tmp;
1156   }
1157 
1158   return emitCastToAtomicIntPointer(Addr);
1159 }
1160 
1161 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1162                                              AggValueSlot resultSlot,
1163                                              SourceLocation loc,
1164                                              bool asValue) const {
1165   if (LVal.isSimple()) {
1166     if (EvaluationKind == TEK_Aggregate)
1167       return resultSlot.asRValue();
1168 
1169     // Drill into the padding structure if we have one.
1170     if (hasPadding())
1171       addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
1172 
1173     // Otherwise, just convert the temporary to an r-value using the
1174     // normal conversion routine.
1175     return CGF.convertTempToRValue(addr, getValueType(), loc);
1176   }
1177   if (!asValue)
1178     // Get RValue from temp memory as atomic for non-simple lvalues
1179     return RValue::get(CGF.Builder.CreateLoad(addr));
1180   if (LVal.isBitField())
1181     return CGF.EmitLoadOfBitfieldLValue(
1182         LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1183                              LVal.getAlignmentSource()));
1184   if (LVal.isVectorElt())
1185     return CGF.EmitLoadOfLValue(
1186         LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1187                               LVal.getAlignmentSource()), loc);
1188   assert(LVal.isExtVectorElt());
1189   return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1190       addr, LVal.getExtVectorElts(), LVal.getType(),
1191       LVal.getAlignmentSource()));
1192 }
1193 
1194 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1195                                              AggValueSlot ResultSlot,
1196                                              SourceLocation Loc,
1197                                              bool AsValue) const {
1198   // Try not to in some easy cases.
1199   assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1200   if (getEvaluationKind() == TEK_Scalar &&
1201       (((!LVal.isBitField() ||
1202          LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1203         !hasPadding()) ||
1204        !AsValue)) {
1205     auto *ValTy = AsValue
1206                       ? CGF.ConvertTypeForMem(ValueTy)
1207                       : getAtomicAddress().getType()->getPointerElementType();
1208     if (ValTy->isIntegerTy()) {
1209       assert(IntVal->getType() == ValTy && "Different integer types.");
1210       return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1211     } else if (ValTy->isPointerTy())
1212       return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1213     else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1214       return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1215   }
1216 
1217   // Create a temporary.  This needs to be big enough to hold the
1218   // atomic integer.
1219   Address Temp = Address::invalid();
1220   bool TempIsVolatile = false;
1221   if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1222     assert(!ResultSlot.isIgnored());
1223     Temp = ResultSlot.getAddress();
1224     TempIsVolatile = ResultSlot.isVolatile();
1225   } else {
1226     Temp = CreateTempAlloca();
1227   }
1228 
1229   // Slam the integer into the temporary.
1230   Address CastTemp = emitCastToAtomicIntPointer(Temp);
1231   CGF.Builder.CreateStore(IntVal, CastTemp)
1232       ->setVolatile(TempIsVolatile);
1233 
1234   return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1235 }
1236 
1237 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1238                                        llvm::AtomicOrdering AO, bool) {
1239   // void __atomic_load(size_t size, void *mem, void *return, int order);
1240   CallArgList Args;
1241   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1242   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1243            CGF.getContext().VoidPtrTy);
1244   Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1245            CGF.getContext().VoidPtrTy);
1246   Args.add(
1247       RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1248       CGF.getContext().IntTy);
1249   emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1250 }
1251 
1252 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1253                                           bool IsVolatile) {
1254   // Okay, we're doing this natively.
1255   Address Addr = getAtomicAddressAsAtomicIntPointer();
1256   llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1257   Load->setAtomic(AO);
1258 
1259   // Other decoration.
1260   if (IsVolatile)
1261     Load->setVolatile(true);
1262   if (LVal.getTBAAInfo())
1263     CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1264   return Load;
1265 }
1266 
1267 /// An LValue is a candidate for having its loads and stores be made atomic if
1268 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1269 /// performing such an operation can be performed without a libcall.
1270 bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1271   if (!CGM.getCodeGenOpts().MSVolatile) return false;
1272   AtomicInfo AI(*this, LV);
1273   bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1274   // An atomic is inline if we don't need to use a libcall.
1275   bool AtomicIsInline = !AI.shouldUseLibcall();
1276   // MSVC doesn't seem to do this for types wider than a pointer.
1277   if (getContext().getTypeSize(LV.getType()) >
1278       getContext().getTypeSize(getContext().getIntPtrType()))
1279     return false;
1280   return IsVolatile && AtomicIsInline;
1281 }
1282 
1283 RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1284                                        AggValueSlot Slot) {
1285   llvm::AtomicOrdering AO;
1286   bool IsVolatile = LV.isVolatileQualified();
1287   if (LV.getType()->isAtomicType()) {
1288     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1289   } else {
1290     AO = llvm::AtomicOrdering::Acquire;
1291     IsVolatile = true;
1292   }
1293   return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1294 }
1295 
1296 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1297                                   bool AsValue, llvm::AtomicOrdering AO,
1298                                   bool IsVolatile) {
1299   // Check whether we should use a library call.
1300   if (shouldUseLibcall()) {
1301     Address TempAddr = Address::invalid();
1302     if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1303       assert(getEvaluationKind() == TEK_Aggregate);
1304       TempAddr = ResultSlot.getAddress();
1305     } else
1306       TempAddr = CreateTempAlloca();
1307 
1308     EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1309 
1310     // Okay, turn that back into the original value or whole atomic (for
1311     // non-simple lvalues) type.
1312     return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1313   }
1314 
1315   // Okay, we're doing this natively.
1316   auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1317 
1318   // If we're ignoring an aggregate return, don't do anything.
1319   if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1320     return RValue::getAggregate(Address::invalid(), false);
1321 
1322   // Okay, turn that back into the original value or atomic (for non-simple
1323   // lvalues) type.
1324   return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1325 }
1326 
1327 /// Emit a load from an l-value of atomic type.  Note that the r-value
1328 /// we produce is an r-value of the atomic *value* type.
1329 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1330                                        llvm::AtomicOrdering AO, bool IsVolatile,
1331                                        AggValueSlot resultSlot) {
1332   AtomicInfo Atomics(*this, src);
1333   return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1334                                 IsVolatile);
1335 }
1336 
1337 /// Copy an r-value into memory as part of storing to an atomic type.
1338 /// This needs to create a bit-pattern suitable for atomic operations.
1339 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1340   assert(LVal.isSimple());
1341   // If we have an r-value, the rvalue should be of the atomic type,
1342   // which means that the caller is responsible for having zeroed
1343   // any padding.  Just do an aggregate copy of that type.
1344   if (rvalue.isAggregate()) {
1345     CGF.EmitAggregateCopy(getAtomicAddress(),
1346                           rvalue.getAggregateAddress(),
1347                           getAtomicType(),
1348                           (rvalue.isVolatileQualified()
1349                            || LVal.isVolatileQualified()));
1350     return;
1351   }
1352 
1353   // Okay, otherwise we're copying stuff.
1354 
1355   // Zero out the buffer if necessary.
1356   emitMemSetZeroIfNecessary();
1357 
1358   // Drill past the padding if present.
1359   LValue TempLVal = projectValue();
1360 
1361   // Okay, store the rvalue in.
1362   if (rvalue.isScalar()) {
1363     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1364   } else {
1365     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1366   }
1367 }
1368 
1369 
1370 /// Materialize an r-value into memory for the purposes of storing it
1371 /// to an atomic type.
1372 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1373   // Aggregate r-values are already in memory, and EmitAtomicStore
1374   // requires them to be values of the atomic type.
1375   if (rvalue.isAggregate())
1376     return rvalue.getAggregateAddress();
1377 
1378   // Otherwise, make a temporary and materialize into it.
1379   LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1380   AtomicInfo Atomics(CGF, TempLV);
1381   Atomics.emitCopyIntoMemory(rvalue);
1382   return TempLV.getAddress();
1383 }
1384 
1385 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1386   // If we've got a scalar value of the right size, try to avoid going
1387   // through memory.
1388   if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1389     llvm::Value *Value = RVal.getScalarVal();
1390     if (isa<llvm::IntegerType>(Value->getType()))
1391       return CGF.EmitToMemory(Value, ValueTy);
1392     else {
1393       llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1394           CGF.getLLVMContext(),
1395           LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1396       if (isa<llvm::PointerType>(Value->getType()))
1397         return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1398       else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1399         return CGF.Builder.CreateBitCast(Value, InputIntTy);
1400     }
1401   }
1402   // Otherwise, we need to go through memory.
1403   // Put the r-value in memory.
1404   Address Addr = materializeRValue(RVal);
1405 
1406   // Cast the temporary to the atomic int type and pull a value out.
1407   Addr = emitCastToAtomicIntPointer(Addr);
1408   return CGF.Builder.CreateLoad(Addr);
1409 }
1410 
1411 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1412     llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1413     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1414   // Do the atomic store.
1415   Address Addr = getAtomicAddressAsAtomicIntPointer();
1416   auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1417                                                ExpectedVal, DesiredVal,
1418                                                Success, Failure);
1419   // Other decoration.
1420   Inst->setVolatile(LVal.isVolatileQualified());
1421   Inst->setWeak(IsWeak);
1422 
1423   // Okay, turn that back into the original value type.
1424   auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1425   auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1426   return std::make_pair(PreviousVal, SuccessFailureVal);
1427 }
1428 
1429 llvm::Value *
1430 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1431                                              llvm::Value *DesiredAddr,
1432                                              llvm::AtomicOrdering Success,
1433                                              llvm::AtomicOrdering Failure) {
1434   // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1435   // void *desired, int success, int failure);
1436   CallArgList Args;
1437   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1438   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1439            CGF.getContext().VoidPtrTy);
1440   Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1441            CGF.getContext().VoidPtrTy);
1442   Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1443            CGF.getContext().VoidPtrTy);
1444   Args.add(RValue::get(
1445                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1446            CGF.getContext().IntTy);
1447   Args.add(RValue::get(
1448                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1449            CGF.getContext().IntTy);
1450   auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1451                                               CGF.getContext().BoolTy, Args);
1452 
1453   return SuccessFailureRVal.getScalarVal();
1454 }
1455 
1456 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1457     RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1458     llvm::AtomicOrdering Failure, bool IsWeak) {
1459   if (isStrongerThan(Failure, Success))
1460     // Don't assert on undefined behavior "failure argument shall be no stronger
1461     // than the success argument".
1462     Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1463 
1464   // Check whether we should use a library call.
1465   if (shouldUseLibcall()) {
1466     // Produce a source address.
1467     Address ExpectedAddr = materializeRValue(Expected);
1468     Address DesiredAddr = materializeRValue(Desired);
1469     auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1470                                                  DesiredAddr.getPointer(),
1471                                                  Success, Failure);
1472     return std::make_pair(
1473         convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1474                                   SourceLocation(), /*AsValue=*/false),
1475         Res);
1476   }
1477 
1478   // If we've got a scalar value of the right size, try to avoid going
1479   // through memory.
1480   auto *ExpectedVal = convertRValueToInt(Expected);
1481   auto *DesiredVal = convertRValueToInt(Desired);
1482   auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1483                                          Failure, IsWeak);
1484   return std::make_pair(
1485       ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1486                                 SourceLocation(), /*AsValue=*/false),
1487       Res.second);
1488 }
1489 
1490 static void
1491 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1492                       const llvm::function_ref<RValue(RValue)> &UpdateOp,
1493                       Address DesiredAddr) {
1494   RValue UpRVal;
1495   LValue AtomicLVal = Atomics.getAtomicLValue();
1496   LValue DesiredLVal;
1497   if (AtomicLVal.isSimple()) {
1498     UpRVal = OldRVal;
1499     DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1500   } else {
1501     // Build new lvalue for temp address
1502     Address Ptr = Atomics.materializeRValue(OldRVal);
1503     LValue UpdateLVal;
1504     if (AtomicLVal.isBitField()) {
1505       UpdateLVal =
1506           LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1507                                AtomicLVal.getType(),
1508                                AtomicLVal.getAlignmentSource());
1509       DesiredLVal =
1510           LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1511                                AtomicLVal.getType(),
1512                                AtomicLVal.getAlignmentSource());
1513     } else if (AtomicLVal.isVectorElt()) {
1514       UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1515                                          AtomicLVal.getType(),
1516                                          AtomicLVal.getAlignmentSource());
1517       DesiredLVal = LValue::MakeVectorElt(
1518           DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1519           AtomicLVal.getAlignmentSource());
1520     } else {
1521       assert(AtomicLVal.isExtVectorElt());
1522       UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1523                                             AtomicLVal.getType(),
1524                                             AtomicLVal.getAlignmentSource());
1525       DesiredLVal = LValue::MakeExtVectorElt(
1526           DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1527           AtomicLVal.getAlignmentSource());
1528     }
1529     UpdateLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1530     DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1531     UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1532   }
1533   // Store new value in the corresponding memory area
1534   RValue NewRVal = UpdateOp(UpRVal);
1535   if (NewRVal.isScalar()) {
1536     CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1537   } else {
1538     assert(NewRVal.isComplex());
1539     CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1540                            /*isInit=*/false);
1541   }
1542 }
1543 
1544 void AtomicInfo::EmitAtomicUpdateLibcall(
1545     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1546     bool IsVolatile) {
1547   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1548 
1549   Address ExpectedAddr = CreateTempAlloca();
1550 
1551   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1552   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1553   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1554   CGF.EmitBlock(ContBB);
1555   Address DesiredAddr = CreateTempAlloca();
1556   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1557       requiresMemSetZero(getAtomicAddress().getElementType())) {
1558     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1559     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1560   }
1561   auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1562                                            AggValueSlot::ignored(),
1563                                            SourceLocation(), /*AsValue=*/false);
1564   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1565   auto *Res =
1566       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1567                                        DesiredAddr.getPointer(),
1568                                        AO, Failure);
1569   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1570   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1571 }
1572 
1573 void AtomicInfo::EmitAtomicUpdateOp(
1574     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1575     bool IsVolatile) {
1576   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1577 
1578   // Do the atomic load.
1579   auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1580   // For non-simple lvalues perform compare-and-swap procedure.
1581   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1582   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1583   auto *CurBB = CGF.Builder.GetInsertBlock();
1584   CGF.EmitBlock(ContBB);
1585   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1586                                              /*NumReservedValues=*/2);
1587   PHI->addIncoming(OldVal, CurBB);
1588   Address NewAtomicAddr = CreateTempAlloca();
1589   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1590   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1591       requiresMemSetZero(getAtomicAddress().getElementType())) {
1592     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1593   }
1594   auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1595                                            SourceLocation(), /*AsValue=*/false);
1596   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1597   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1598   // Try to write new value using cmpxchg operation
1599   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1600   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1601   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1602   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1603 }
1604 
1605 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1606                                   RValue UpdateRVal, Address DesiredAddr) {
1607   LValue AtomicLVal = Atomics.getAtomicLValue();
1608   LValue DesiredLVal;
1609   // Build new lvalue for temp address
1610   if (AtomicLVal.isBitField()) {
1611     DesiredLVal =
1612         LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1613                              AtomicLVal.getType(),
1614                              AtomicLVal.getAlignmentSource());
1615   } else if (AtomicLVal.isVectorElt()) {
1616     DesiredLVal =
1617         LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1618                               AtomicLVal.getType(),
1619                               AtomicLVal.getAlignmentSource());
1620   } else {
1621     assert(AtomicLVal.isExtVectorElt());
1622     DesiredLVal = LValue::MakeExtVectorElt(
1623         DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1624         AtomicLVal.getAlignmentSource());
1625   }
1626   DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1627   // Store new value in the corresponding memory area
1628   assert(UpdateRVal.isScalar());
1629   CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1630 }
1631 
1632 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1633                                          RValue UpdateRVal, bool IsVolatile) {
1634   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1635 
1636   Address ExpectedAddr = CreateTempAlloca();
1637 
1638   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1639   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1640   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1641   CGF.EmitBlock(ContBB);
1642   Address DesiredAddr = CreateTempAlloca();
1643   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1644       requiresMemSetZero(getAtomicAddress().getElementType())) {
1645     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1646     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1647   }
1648   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1649   auto *Res =
1650       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1651                                        DesiredAddr.getPointer(),
1652                                        AO, Failure);
1653   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1654   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1655 }
1656 
1657 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1658                                     bool IsVolatile) {
1659   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1660 
1661   // Do the atomic load.
1662   auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1663   // For non-simple lvalues perform compare-and-swap procedure.
1664   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1665   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1666   auto *CurBB = CGF.Builder.GetInsertBlock();
1667   CGF.EmitBlock(ContBB);
1668   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1669                                              /*NumReservedValues=*/2);
1670   PHI->addIncoming(OldVal, CurBB);
1671   Address NewAtomicAddr = CreateTempAlloca();
1672   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1673   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1674       requiresMemSetZero(getAtomicAddress().getElementType())) {
1675     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1676   }
1677   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1678   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1679   // Try to write new value using cmpxchg operation
1680   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1681   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1682   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1683   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1684 }
1685 
1686 void AtomicInfo::EmitAtomicUpdate(
1687     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1688     bool IsVolatile) {
1689   if (shouldUseLibcall()) {
1690     EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1691   } else {
1692     EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1693   }
1694 }
1695 
1696 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1697                                   bool IsVolatile) {
1698   if (shouldUseLibcall()) {
1699     EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1700   } else {
1701     EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1702   }
1703 }
1704 
1705 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1706                                       bool isInit) {
1707   bool IsVolatile = lvalue.isVolatileQualified();
1708   llvm::AtomicOrdering AO;
1709   if (lvalue.getType()->isAtomicType()) {
1710     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1711   } else {
1712     AO = llvm::AtomicOrdering::Release;
1713     IsVolatile = true;
1714   }
1715   return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1716 }
1717 
1718 /// Emit a store to an l-value of atomic type.
1719 ///
1720 /// Note that the r-value is expected to be an r-value *of the atomic
1721 /// type*; this means that for aggregate r-values, it should include
1722 /// storage for any padding that was necessary.
1723 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1724                                       llvm::AtomicOrdering AO, bool IsVolatile,
1725                                       bool isInit) {
1726   // If this is an aggregate r-value, it should agree in type except
1727   // maybe for address-space qualification.
1728   assert(!rvalue.isAggregate() ||
1729          rvalue.getAggregateAddress().getElementType()
1730            == dest.getAddress().getElementType());
1731 
1732   AtomicInfo atomics(*this, dest);
1733   LValue LVal = atomics.getAtomicLValue();
1734 
1735   // If this is an initialization, just put the value there normally.
1736   if (LVal.isSimple()) {
1737     if (isInit) {
1738       atomics.emitCopyIntoMemory(rvalue);
1739       return;
1740     }
1741 
1742     // Check whether we should use a library call.
1743     if (atomics.shouldUseLibcall()) {
1744       // Produce a source address.
1745       Address srcAddr = atomics.materializeRValue(rvalue);
1746 
1747       // void __atomic_store(size_t size, void *mem, void *val, int order)
1748       CallArgList args;
1749       args.add(RValue::get(atomics.getAtomicSizeValue()),
1750                getContext().getSizeType());
1751       args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
1752                getContext().VoidPtrTy);
1753       args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
1754                getContext().VoidPtrTy);
1755       args.add(
1756           RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
1757           getContext().IntTy);
1758       emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1759       return;
1760     }
1761 
1762     // Okay, we're doing this natively.
1763     llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1764 
1765     // Do the atomic store.
1766     Address addr =
1767         atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1768     intValue = Builder.CreateIntCast(
1769         intValue, addr.getElementType(), /*isSigned=*/false);
1770     llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1771 
1772     // Initializations don't need to be atomic.
1773     if (!isInit)
1774       store->setAtomic(AO);
1775 
1776     // Other decoration.
1777     if (IsVolatile)
1778       store->setVolatile(true);
1779     if (dest.getTBAAInfo())
1780       CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
1781     return;
1782   }
1783 
1784   // Emit simple atomic update operation.
1785   atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1786 }
1787 
1788 /// Emit a compare-and-exchange op for atomic type.
1789 ///
1790 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
1791     LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1792     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1793     AggValueSlot Slot) {
1794   // If this is an aggregate r-value, it should agree in type except
1795   // maybe for address-space qualification.
1796   assert(!Expected.isAggregate() ||
1797          Expected.getAggregateAddress().getElementType() ==
1798              Obj.getAddress().getElementType());
1799   assert(!Desired.isAggregate() ||
1800          Desired.getAggregateAddress().getElementType() ==
1801              Obj.getAddress().getElementType());
1802   AtomicInfo Atomics(*this, Obj);
1803 
1804   return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1805                                            IsWeak);
1806 }
1807 
1808 void CodeGenFunction::EmitAtomicUpdate(
1809     LValue LVal, llvm::AtomicOrdering AO,
1810     const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
1811   AtomicInfo Atomics(*this, LVal);
1812   Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
1813 }
1814 
1815 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1816   AtomicInfo atomics(*this, dest);
1817 
1818   switch (atomics.getEvaluationKind()) {
1819   case TEK_Scalar: {
1820     llvm::Value *value = EmitScalarExpr(init);
1821     atomics.emitCopyIntoMemory(RValue::get(value));
1822     return;
1823   }
1824 
1825   case TEK_Complex: {
1826     ComplexPairTy value = EmitComplexExpr(init);
1827     atomics.emitCopyIntoMemory(RValue::getComplex(value));
1828     return;
1829   }
1830 
1831   case TEK_Aggregate: {
1832     // Fix up the destination if the initializer isn't an expression
1833     // of atomic type.
1834     bool Zeroed = false;
1835     if (!init->getType()->isAtomicType()) {
1836       Zeroed = atomics.emitMemSetZeroIfNecessary();
1837       dest = atomics.projectValue();
1838     }
1839 
1840     // Evaluate the expression directly into the destination.
1841     AggValueSlot slot = AggValueSlot::forLValue(dest,
1842                                         AggValueSlot::IsNotDestructed,
1843                                         AggValueSlot::DoesNotNeedGCBarriers,
1844                                         AggValueSlot::IsNotAliased,
1845                                         Zeroed ? AggValueSlot::IsZeroed :
1846                                                  AggValueSlot::IsNotZeroed);
1847 
1848     EmitAggExpr(init, slot);
1849     return;
1850   }
1851   }
1852   llvm_unreachable("bad evaluation kind");
1853 }
1854